]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'next/kvm' into mips-for-linux-next
authorRalf Baechle <ralf@linux-mips.org>
Thu, 9 May 2013 15:56:40 +0000 (17:56 +0200)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 9 May 2013 15:56:40 +0000 (17:56 +0200)
42 files changed:
arch/mips/Kbuild
arch/mips/Kconfig
arch/mips/configs/malta_kvm_defconfig [new file with mode: 0644]
arch/mips/configs/malta_kvm_guest_defconfig [new file with mode: 0644]
arch/mips/include/asm/kvm.h [new file with mode: 0644]
arch/mips/include/asm/kvm_host.h [new file with mode: 0644]
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/sn/sn_private.h
arch/mips/include/asm/sn/types.h
arch/mips/include/asm/uaccess.h
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kvm/00README.txt [new file with mode: 0644]
arch/mips/kvm/Kconfig [new file with mode: 0644]
arch/mips/kvm/Makefile [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [new file with mode: 0644]
arch/mips/kvm/kvm_locore.S [new file with mode: 0644]
arch/mips/kvm/kvm_mips.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_comm.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_commpage.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_dyntrans.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_emul.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_opcode.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_stats.c [new file with mode: 0644]
arch/mips/kvm/kvm_tlb.c [new file with mode: 0644]
arch/mips/kvm/kvm_trap_emul.c [new file with mode: 0644]
arch/mips/kvm/trace.h [new file with mode: 0644]
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/tlb-r4k.c
arch/mips/mti-malta/Platform
arch/mips/mti-malta/malta-time.c
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sgi-ip27/ip27-memory.c
virt/kvm/kvm_main.c

index 7dd65cfae83759562e43ab20bb03be071ae56ce5..d2cfe45f332b419b5c463b78eb4f44a40e0386c4 100644 (file)
@@ -17,3 +17,7 @@ obj- := $(platform-)
 obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
index c1997db9c57c7e52c2ed99ed9b46967f08ec19ce..0cb6f5ffeecd47d5f1cfcff27cbce78f4dc02914 100644 (file)
@@ -1234,6 +1234,7 @@ config CPU_MIPS32_R2
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@ -1734,6 +1735,20 @@ config 64BIT
 
 endchoice
 
+config KVM_GUEST
+       bool "KVM Guest Kernel"
+       help
+         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+       int "KVM Host Processor Frequency (MHz)"
+       depends on KVM_GUEST
+       default 500
+       help
+         Select this option if building a guest kernel for KVM to skip
+         RTC emulation when determining guest CPU Frequency.  Instead, the guest
+         processor frequency is automatically derived from the host frequency.
+
 choice
        prompt "Kernel page size"
        default PAGE_SIZE_4KB
@@ -2014,6 +2029,7 @@ config SB1_PASS_2_1_WORKAROUNDS
        depends on CPU_SB1 && CPU_SB1_PASS_2
        default y
 
+
 config 64BIT_PHYS_ADDR
        bool
 
@@ -2547,3 +2563,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644 (file)
index 0000000..341bb47
--- /dev/null
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644 (file)
index 0000000..2b8558b
--- /dev/null
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644 (file)
index 0000000..85789ea
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+#define __KVM_MIPS
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       __u32 gprs[32];
+       __u32 hi;
+       __u32 lo;
+       __u32 pc;
+
+       __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..143875c
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS          1
+#define KVM_USER_MEM_SLOTS     8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS  0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES      1
+#define KVM_PAGES_PER_HPAGE(x) 1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR     0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu)    ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+                                       ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG             0x00000000UL
+#define KVM_GUEST_KSEG0             0x40000000UL
+#define KVM_GUEST_KSEG23            0x60000000UL
+#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a)       (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE            0xdeadbeef
+#define KVM_INVALID_INST            0xdeadbeef
+#define KVM_INVALID_ADDR            0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC       27
+#define CAUSEF_DC       (_ULCAST_(1)   << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 wait_exits;
+       u32 cache_exits;
+       u32 signal_exits;
+       u32 int_exits;
+       u32 cop_unusable_exits;
+       u32 tlbmod_exits;
+       u32 tlbmiss_ld_exits;
+       u32 tlbmiss_st_exits;
+       u32 addrerr_st_exits;
+       u32 addrerr_ld_exits;
+       u32 syscall_exits;
+       u32 resvd_inst_exits;
+       u32 break_inst_exits;
+       u32 flush_dcache_exits;
+       u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+       WAIT_EXITS,
+       CACHE_EXITS,
+       SIGNAL_EXITS,
+       INT_EXITS,
+       COP_UNUSABLE_EXITS,
+       TLBMOD_EXITS,
+       TLBMISS_LD_EXITS,
+       TLBMISS_ST_EXITS,
+       ADDRERR_ST_EXITS,
+       ADDRERR_LD_EXITS,
+       SYSCALL_EXITS,
+       RESVD_INST_EXITS,
+       BREAK_INST_EXITS,
+       FLUSH_DCACHE_EXITS,
+       MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+       /* Guest GVA->HPA page table */
+       unsigned long *guest_pmap;
+       unsigned long guest_pmap_npages;
+
+       /* Wired host TLB used for the commpage */
+       int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+struct mips_coproc {
+       unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define        MIPS_CP0_TLB_INDEX          0
+#define        MIPS_CP0_TLB_RANDOM         1
+#define        MIPS_CP0_TLB_LOW            2
+#define        MIPS_CP0_TLB_LO0            2
+#define        MIPS_CP0_TLB_LO1            3
+#define        MIPS_CP0_TLB_CONTEXT    4
+#define        MIPS_CP0_TLB_PG_MASK    5
+#define        MIPS_CP0_TLB_WIRED          6
+#define        MIPS_CP0_HWRENA             7
+#define        MIPS_CP0_BAD_VADDR          8
+#define        MIPS_CP0_COUNT          9
+#define        MIPS_CP0_TLB_HI         10
+#define        MIPS_CP0_COMPARE            11
+#define        MIPS_CP0_STATUS         12
+#define        MIPS_CP0_CAUSE          13
+#define        MIPS_CP0_EXC_PC         14
+#define        MIPS_CP0_PRID               15
+#define        MIPS_CP0_CONFIG         16
+#define        MIPS_CP0_LLADDR         17
+#define        MIPS_CP0_WATCH_LO           18
+#define        MIPS_CP0_WATCH_HI           19
+#define        MIPS_CP0_TLB_XCONTEXT   20
+#define        MIPS_CP0_ECC                26
+#define        MIPS_CP0_CACHE_ERR          27
+#define        MIPS_CP0_TAG_LO         28
+#define        MIPS_CP0_TAG_HI         29
+#define        MIPS_CP0_ERROR_PC           30
+#define        MIPS_CP0_DEBUG          23
+#define        MIPS_CP0_DEPC               24
+#define        MIPS_CP0_PERFCNT            25
+#define        MIPS_CP0_ERRCTL         26
+#define        MIPS_CP0_DATA_LO            28
+#define        MIPS_CP0_DATA_HI            29
+#define        MIPS_CP0_DESAVE         31
+
+#define MIPS_CP0_CONFIG_SEL        0
+#define MIPS_CP0_CONFIG1_SEL    1
+#define MIPS_CP0_CONFIG2_SEL    2
+#define MIPS_CP0_CONFIG3_SEL    3
+
+/* Config0 register bits */
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   17
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+
+/* Config1 register bits */
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+
+/* Config2 Register bits */
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+
+/* Config3 Register bits */
+#define CP0C3_M    31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI  13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0                                              \
+  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+   no performance counters, watch registers present,
+   no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1                                              \
+((1 << CP0C1_M) |                                                 \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2                                              \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+   no external interrupt controller, no vectored interrupts,
+   no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3                                              \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+   CP0C0_MT field.  */
+enum mips_mmu_types {
+       MMU_TYPE_NONE,
+       MMU_TYPE_R4000,
+       MMU_TYPE_RESERVED,
+       MMU_TYPE_FMT,
+       MMU_TYPE_R3000,
+       MMU_TYPE_R6000,
+       MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT           0      /* Interrupt pending */
+#define T_TLB_MOD       1      /* TLB modified fault */
+#define T_TLB_LD_MISS       2  /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS       3  /* TLB miss on a store */
+#define T_ADDR_ERR_LD       4  /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST       5  /* Address error on a store */
+#define T_BUS_ERR_IFETCH    6  /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST     7  /* Bus error on a load or store */
+#define T_SYSCALL       8      /* System call */
+#define T_BREAK         9      /* Breakpoint */
+#define T_RES_INST      10     /* Reserved instruction exception */
+#define T_COP_UNUSABLE      11 /* Coprocessor unusable */
+#define T_OVFLOW        12     /* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP          13     /* Trap instruction */
+#define T_VCEI          14     /* Virtual coherency exception */
+#define T_FPE           15     /* Floating point exception */
+#define T_WATCH         23     /* Watch address reference */
+#define T_VCED          31     /* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR          (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1) /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_DR         RESUME_FLAG_DR
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+enum emulation_result {
+       EMULATE_DONE,           /* no further processing */
+       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
+       EMULATE_FAIL,           /* can't emulate this instruction */
+       EMULATE_WAIT,           /* WAIT instruction */
+       EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G  0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V  0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D  0x00000004 /* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT      6
+#define MIPS3_PG_FRAME      0x3fffffc0
+
+#define VPN2_MASK           0xffffe000
+#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+       long tlb_mask;
+       long tlb_hi;
+       long tlb_lo0;
+       long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE     64
+struct kvm_vcpu_arch {
+       void *host_ebase, *guest_ebase;
+       unsigned long host_stack;
+       unsigned long host_gp;
+
+       /* Host CP0 registers used when handling exits from guest */
+       unsigned long host_cp0_badvaddr;
+       unsigned long host_cp0_cause;
+       unsigned long host_cp0_epc;
+       unsigned long host_cp0_entryhi;
+       uint32_t guest_inst;
+
+       /* GPRS */
+       unsigned long gprs[32];
+       unsigned long hi;
+       unsigned long lo;
+       unsigned long pc;
+
+       /* FPU State */
+       struct mips_fpu_struct fpu;
+
+       /* COP0 State */
+       struct mips_coproc *cop0;
+
+       /* Host KSEG0 address of the EI/DI offset */
+       void *kseg0_commpage;
+
+       u32 io_gpr;             /* GPR used as IO source/target */
+
+       /* Used to calibrate the virutal count register for the guest */
+       int32_t host_cp0_count;
+
+       /* Bitmask of exceptions that are pending */
+       unsigned long pending_exceptions;
+
+       /* Bitmask of pending exceptions to be cleared */
+       unsigned long pending_exceptions_clr;
+
+       unsigned long pending_load_cause;
+
+       /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+       unsigned long preempt_entryhi;
+
+       /* S/W Based TLB for guest */
+       struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+       /* Cached guest kernel/user ASIDs */
+       uint32_t guest_user_asid[NR_CPUS];
+       uint32_t guest_kernel_asid[NR_CPUS];
+       struct mm_struct guest_kernel_mm, guest_user_mm;
+
+       struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+       struct hrtimer comparecount_timer;
+
+       int last_sched_cpu;
+
+       /* WAIT executed */
+       int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_cause(cop0, change);           \
+    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_ebase(cop0, change);           \
+    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+       int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+       int (*handle_syscall) (struct kvm_vcpu *vcpu);
+       int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+       int (*handle_break) (struct kvm_vcpu *vcpu);
+       int (*vm_init) (struct kvm *kvm);
+       int (*vcpu_init) (struct kvm_vcpu *vcpu);
+       int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+        gpa_t(*gva_to_gpa) (gva_t gva);
+       void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*queue_io_int) (struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+       void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+       int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+       int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+       int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+       int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+                                          struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+                                             struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                               struct kvm_mips_tlb *tlb,
+                                               unsigned long *hpa0,
+                                               unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+                                                   uint32_t *opc,
+                                                   struct kvm_run *run,
+                                                   struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+                                    unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                  unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                                   struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+                                                  uint32_t *opc,
+                                                  struct kvm_run *run,
+                                                  struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+                                               uint32_t *opc,
+                                               struct kvm_run *run,
+                                               struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                        struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+                                            uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+                                          uint32_t *opc,
+                                          uint32_t cause,
+                                          struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+                                           uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                                     struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                                  struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
index 73d717a75cb0a1038e2f10009d67e657e23ab429..5b2f2e68e57f08210be7d4a98370cb3895111adf 100644 (file)
 #endif
 
 #ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE               _AC(0x40000000, UL)
+#else
 #define CAC_BASE               _AC(0x80000000, UL)
+#endif
 #define IO_BASE                        _AC(0xa0000000, UL)
 #define UNCAC_BASE             _AC(0xa0000000, UL)
 
 #ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE               _AC(0x60000000, UL)
+#else
 #define MAP_BASE               _AC(0xc0000000, UL)
 #endif
+#endif
 
 /*
  * Memory above this physical address will be considered highmem.
index 952701c3ad2e844e70026a8d2b18ece4e2e6e3bb..820116067c101070c6a4c35727d1e4cfb24563ee 100644 (file)
@@ -111,15 +111,21 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
+       extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
        if (! ((asid += ASID_INC) & ASID_MASK) ) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+#else
                local_flush_tlb_all();  /* start new asid cycle */
+#endif
                if (!asid)              /* fix version if needed */
                        asid = ASID_FIRST_VERSION;
        }
+
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
index 2a5fa7abb346b8633ddda81ff40766c0e2e47aef..71686c897deaa2651bbb5808ae4192f57ea1f54f 100644 (file)
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
 #define SPECIAL_PAGES_SIZE PAGE_SIZE
 
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE      0x3fff8000UL
+#else
 /*
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
 #define TASK_SIZE      0x7fff8000UL
+#endif
 
 #ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
index 1a2c3025bf2892ba98feb2a338ae2cf18c5c3e4c..fdfae43d8b99e49b4f6b20565f6e97178f00a216 100644 (file)
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
 extern void setup_replication_mask(void);
 extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
 
 #endif /* __ASM_SN_SN_PRIVATE_H */
index c4813d67aec3f5543272a53e1d87e9a1b202e193..6d24d4e8b9ed855cb27a860133e1a541c91870ab 100644 (file)
@@ -19,7 +19,6 @@ typedef signed char   partid_t;       /* partition ID type */
 typedef signed short   moduleid_t;     /* user-visible module number type */
 typedef signed short   cmoduleid_t;    /* kernel compact module id type */
 typedef unsigned char  clusterid_t;    /* Clusterid of the cell */
-typedef unsigned long  pfn_t;
 
 typedef dev_t          vertex_hdl_t;   /* hardware graph vertex handle */
 
index bd87e36bf26a3cc980c73804bbb8a5d009f4d2ef..b46caab453a5aa53d779bf36c2ca94be1e1af9fc 100644 (file)
  */
 #ifdef CONFIG_32BIT
 
-#define __UA_LIMIT     0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
 
 #define __UA_ADDR      ".word"
 #define __UA_LA                "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
  * address in this range it's the process's problem, not ours :-)
  */
 
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS      ((mm_segment_t) { 0x80000000UL })
+#define USER_DS                ((mm_segment_t) { 0xC0000000UL })
+#else
 #define KERNEL_DS      ((mm_segment_t) { 0UL })
 #define USER_DS                ((mm_segment_t) { __UA_LIMIT })
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
index 50285b2c7ffe30d5a54d877f7f0cac075dd59f8c..0845091ba480bba5313e32f7e12c2a0b79b83048 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
+#include <linux/kvm_host.h>
+
 void output_ptreg_defines(void)
 {
        COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
        BLANK();
 }
 #endif
+
+void output_kvm_defines(void)
+{
+       COMMENT(" KVM/MIPS Specfic offsets. ");
+       DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+       OFFSET(VCPU_RUN, kvm_vcpu, run);
+       OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+       OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+       OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+       OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+       OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+       OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+       OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+       OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+       OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+       OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+       OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+       OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+       OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+       OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+       OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+       OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+       OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+       OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+       OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+       OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+       OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+       OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+       OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+       OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+       OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+       OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+       OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+       OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+       OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+       OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+       OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+       OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+       OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+       OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+       OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+       OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+       OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+       OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+       OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+       OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+       OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+       OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+       OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+       OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+       OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+       OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+       OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+       OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+       OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+       OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+       BLANK();
+}
index 556a4357d7fc69f79b024fd968ef409bcbe20fb8..97c5a1668e5347bb4a7668882931349cfdac08a6 100644 (file)
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
        __res;                                                          \
 })
 
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE            0x3fff8000UL
+#else
 #define TASK32_SIZE            0x7fff8000UL
+#endif
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
index 07b847d77f5da029dca6a6cbb53198807ff781a8..fd75d714452497259272682cb41863c6b9095eb8 100644 (file)
@@ -118,6 +118,10 @@ int c0_compare_int_usable(void)
        unsigned int delta;
        unsigned int cnt;
 
+#ifdef CONFIG_KVM_GUEST
+    return 1;
+#endif
+
        /*
         * IP7 already pending?  Try to clear it by acking the timer.
         */
index 66bf4e22d9b9451615cd8386caaf9b8016b96a34..596620dd7ee2dd8bcd2b0c28d21ab2030575dba2 100644 (file)
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
 }
 
 struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
 
 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
 {
index 3b98b7b8487f42485b6f68e9e4731b70141c5f86..7a99e60dadbd6da60dd6efdf1138ac8e9d94ff74 100644 (file)
@@ -1712,7 +1712,12 @@ void __init trap_init(void)
                ebase = (unsigned long)
                        __alloc_bootmem(size, 1 << fls(size), 0);
        } else {
-               ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0     0x40000000
+        ebase = KVM_GUEST_KSEG0;
+#else
+        ebase = CKSEG0;
+#endif
                if (cpu_has_mips_r2)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644 (file)
index 0000000..51617e4
--- /dev/null
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+       LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+       when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+       This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644 (file)
index 0000000..2c15590
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select KVM_MMIO
+       ---help---
+         Support for hosting Guest kernels.
+         Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+       bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+       depends on KVM
+       ---help---
+         When running in Trap & Emulate mode patch privileged
+         instructions to reduce the number of traps.
+
+         If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+       bool "Maintain counters for COP0 accesses"
+       depends on KVM
+       ---help---
+         Maintain statistics for Guest COP0 accesses.
+         A histogram of COP0 accesses is printed when the VM is
+         shutdown.
+
+         If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644 (file)
index 0000000..78d87bb
--- /dev/null
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+           kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM)      += kvm.o
+obj-y                  += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644 (file)
index 0000000..dca2aa6
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+    .set    push
+    .set    noreorder
+    .set    noat
+
+    /* k0/k1 not being used in host kernel context */
+       addiu           k1,sp, -PT_SIZE
+    LONG_S         $0, PT_R0(k1)
+    LONG_S             $1, PT_R1(k1)
+    LONG_S             $2, PT_R2(k1)
+    LONG_S             $3, PT_R3(k1)
+
+    LONG_S             $4, PT_R4(k1)
+    LONG_S             $5, PT_R5(k1)
+    LONG_S             $6, PT_R6(k1)
+    LONG_S             $7, PT_R7(k1)
+
+    LONG_S             $8,  PT_R8(k1)
+    LONG_S             $9,  PT_R9(k1)
+    LONG_S             $10, PT_R10(k1)
+    LONG_S             $11, PT_R11(k1)
+    LONG_S             $12, PT_R12(k1)
+    LONG_S             $13, PT_R13(k1)
+    LONG_S             $14, PT_R14(k1)
+    LONG_S             $15, PT_R15(k1)
+    LONG_S             $16, PT_R16(k1)
+    LONG_S             $17, PT_R17(k1)
+
+    LONG_S             $18, PT_R18(k1)
+    LONG_S             $19, PT_R19(k1)
+    LONG_S             $20, PT_R20(k1)
+    LONG_S             $21, PT_R21(k1)
+    LONG_S             $22, PT_R22(k1)
+    LONG_S             $23, PT_R23(k1)
+    LONG_S             $24, PT_R24(k1)
+    LONG_S             $25, PT_R25(k1)
+
+       /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+    LONG_S             $28, PT_R28(k1)
+    LONG_S             $29, PT_R29(k1)
+    LONG_S             $30, PT_R30(k1)
+    LONG_S             $31, PT_R31(k1)
+
+    /* Save hi/lo */
+       mflo            v0
+       LONG_S          v0, PT_LO(k1)
+       mfhi            v1
+       LONG_S          v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0            v0, CP0_STATUS
+       LONG_S          v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0            v1,CP0_ENTRYHI
+       andi            v1, 0xff
+       LONG_S          v1, PT_HOST_ASID(k1)
+
+    /* Save DDATA_LO, will be used to store pointer to vcpu */
+    mfc0        v1, CP0_DDATA_LO
+    LONG_S      v1, PT_HOST_USERLOCAL(k1)
+
+    /* DDATA_LO has pointer to vcpu */
+    mtc0        a1,CP0_DDATA_LO
+
+    /* Offset into vcpu->arch */
+       addiu           k1, a1, VCPU_HOST_ARCH
+
+    /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+    LONG_S      sp, VCPU_HOST_STACK(k1)
+
+    /* Save the kernel gp as well */
+    LONG_S      gp, VCPU_HOST_GP(k1)
+
+       /* Setup status register for running the guest in UM, interrupts are disabled */
+       li                      k0,(ST0_EXL | KSU_USER| ST0_BEV)
+       mtc0            k0,CP0_STATUS
+    ehb
+
+    /* load up the new EBASE */
+    LONG_L      k0, VCPU_GUEST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+    /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+     * but make sure that timer interrupts are enabled
+     */
+    li          k0,(ST0_EXL | KSU_USER | ST0_IE)
+    andi        v0, v0, ST0_IM
+    or          k0, k0, v0
+    mtc0        k0,CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* Now load up the Guest Context from VCPU */
+    LONG_L             $1, VCPU_R1(k1)
+    LONG_L             $2, VCPU_R2(k1)
+    LONG_L             $3, VCPU_R3(k1)
+
+    LONG_L             $4, VCPU_R4(k1)
+    LONG_L             $5, VCPU_R5(k1)
+    LONG_L             $6, VCPU_R6(k1)
+    LONG_L             $7, VCPU_R7(k1)
+
+    LONG_L             $8,  VCPU_R8(k1)
+    LONG_L             $9,  VCPU_R9(k1)
+    LONG_L             $10, VCPU_R10(k1)
+    LONG_L             $11, VCPU_R11(k1)
+    LONG_L             $12, VCPU_R12(k1)
+    LONG_L             $13, VCPU_R13(k1)
+    LONG_L             $14, VCPU_R14(k1)
+    LONG_L             $15, VCPU_R15(k1)
+    LONG_L             $16, VCPU_R16(k1)
+    LONG_L             $17, VCPU_R17(k1)
+    LONG_L             $18, VCPU_R18(k1)
+    LONG_L             $19, VCPU_R19(k1)
+    LONG_L             $20, VCPU_R20(k1)
+    LONG_L             $21, VCPU_R21(k1)
+    LONG_L             $22, VCPU_R22(k1)
+    LONG_L             $23, VCPU_R23(k1)
+    LONG_L             $24, VCPU_R24(k1)
+    LONG_L             $25, VCPU_R25(k1)
+
+    /* k0/k1 loaded up later */
+
+    LONG_L             $28, VCPU_R28(k1)
+    LONG_L             $29, VCPU_R29(k1)
+    LONG_L             $30, VCPU_R30(k1)
+    LONG_L             $31, VCPU_R31(k1)
+
+    /* Restore hi/lo */
+       LONG_L          k0, VCPU_LO(k1)
+       mtlo            k0
+
+       LONG_L          k0, VCPU_HI(k1)
+       mthi            k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+    LONG_L             k0, VCPU_R26(k1)
+    LONG_L             k1, VCPU_R27(k1)
+
+    /* Jump to guest */
+       eret
+       .set    pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+    .set    push
+       .set    noat
+    .set    noreorder
+    mtc0    k0, CP0_ERROREPC    #01: Save guest k0
+    ehb                         #02:
+
+    mfc0    k0, CP0_EBASE       #02: Get EBASE
+    srl     k0, k0, 10          #03: Get rid of CPUNum
+    sll     k0, k0, 10          #04
+    LONG_S  k1, 0x3000(k0)      #05: Save k1 @ offset 0x3000
+    addiu   k0, k0, 0x2000      #06: Exception handler is installed @ offset 0x2000
+       j       k0                                      #07: jump to the function
+       nop                                             #08: branch delay slot
+       .set    push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+    .set    push
+    .set    noat
+    .set    noreorder
+
+    /* Get the VCPU pointer from DDTATA_LO */
+    mfc0        k1, CP0_DDATA_LO
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Start saving Guest context to VCPU */
+    LONG_S  $0, VCPU_R0(k1)
+    LONG_S  $1, VCPU_R1(k1)
+    LONG_S  $2, VCPU_R2(k1)
+    LONG_S  $3, VCPU_R3(k1)
+    LONG_S  $4, VCPU_R4(k1)
+    LONG_S  $5, VCPU_R5(k1)
+    LONG_S  $6, VCPU_R6(k1)
+    LONG_S  $7, VCPU_R7(k1)
+    LONG_S  $8, VCPU_R8(k1)
+    LONG_S  $9, VCPU_R9(k1)
+    LONG_S  $10, VCPU_R10(k1)
+    LONG_S  $11, VCPU_R11(k1)
+    LONG_S  $12, VCPU_R12(k1)
+    LONG_S  $13, VCPU_R13(k1)
+    LONG_S  $14, VCPU_R14(k1)
+    LONG_S  $15, VCPU_R15(k1)
+    LONG_S  $16, VCPU_R16(k1)
+    LONG_S  $17,VCPU_R17(k1)
+    LONG_S  $18, VCPU_R18(k1)
+    LONG_S  $19, VCPU_R19(k1)
+    LONG_S  $20, VCPU_R20(k1)
+    LONG_S  $21, VCPU_R21(k1)
+    LONG_S  $22, VCPU_R22(k1)
+    LONG_S  $23, VCPU_R23(k1)
+    LONG_S  $24, VCPU_R24(k1)
+    LONG_S  $25, VCPU_R25(k1)
+
+    /* Guest k0/k1 saved later */
+
+    LONG_S  $28, VCPU_R28(k1)
+    LONG_S  $29, VCPU_R29(k1)
+    LONG_S  $30, VCPU_R30(k1)
+    LONG_S  $31, VCPU_R31(k1)
+
+    /* We need to save hi/lo and restore them on
+     * the way out
+     */
+    mfhi    t0
+    LONG_S  t0, VCPU_HI(k1)
+
+    mflo    t0
+    LONG_S  t0, VCPU_LO(k1)
+
+    /* Finally save guest k0/k1 to VCPU */
+    mfc0    t0, CP0_ERROREPC
+    LONG_S  t0, VCPU_R26(k1)
+
+    /* Get GUEST k1 and save it in VCPU */
+    la      t1, ~0x2ff
+    mfc0    t0, CP0_EBASE
+    and     t0, t0, t1
+    LONG_L  t0, 0x3000(t0)
+    LONG_S  t0, VCPU_R27(k1)
+
+    /* Now that context has been saved, we can use other registers */
+
+    /* Restore vcpu */
+    mfc0        a1, CP0_DDATA_LO
+    move        s1, a1
+
+   /* Restore run (vcpu->run) */
+    LONG_L      a0, VCPU_RUN(a1)
+    /* Save pointer to run in s0, will be saved by the compiler */
+    move        s0, a0
+
+
+    /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+    mfc0    k0,CP0_EPC
+    LONG_S  k0, VCPU_PC(k1)
+
+    mfc0    k0, CP0_BADVADDR
+    LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+    mfc0    k0, CP0_CAUSE
+    LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+    mfc0    k0, CP0_ENTRYHI
+    LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+    /* Now restore the host state just enough to run the handlers */
+
+    /* Swtich EBASE to the one used by Linux */
+    /* load up the host EBASE */
+    mfc0        v0, CP0_STATUS
+
+    .set at
+       or          k0, v0, ST0_BEV
+    .set noat
+
+    mtc0        k0, CP0_STATUS
+    ehb
+
+    LONG_L      k0, VCPU_HOST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+
+    /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+    .set at
+       and         v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+    or          v0, v0, ST0_CU0
+    .set noat
+    mtc0        v0, CP0_STATUS
+    ehb
+
+    /* Load up host GP */
+    LONG_L  gp, VCPU_HOST_GP(k1)
+
+    /* Need a stack before we can jump to "C" */
+    LONG_L  sp, VCPU_HOST_STACK(k1)
+
+    /* Saved host state */
+    addiu   sp,sp, -PT_SIZE
+
+    /* XXXKYMA do we need to load the host ASID, maybe not because the
+     * kernel entries are marked GLOBAL, need to verify
+     */
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(sp)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+    /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+    /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+    /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+    la          t9,kvm_mips_handle_exit
+    jalr.hb     t9
+    addiu       sp,sp, -CALLFRAME_SIZ           /* BD Slot */
+
+    /* Return from handler Make sure interrupts are disabled */
+    di
+    ehb
+
+    /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+     * while we were handling the exception from the guest, reload k1
+     */
+    move        k1, s1
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+     * or resuming the guest
+     */
+    andi        t0, v0, RESUME_HOST
+    bnez        t0, __kvm_mips_return_to_host
+    nop
+
+__kvm_mips_return_to_guest:
+    /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+    mtc0        s1, CP0_DDATA_LO
+
+    /* Load up the Guest EBASE to minimize the window where BEV is set */
+    LONG_L      t0, VCPU_GUEST_EBASE(k1)
+
+    /* Switch EBASE back to the one used by KVM */
+    mfc0        v1, CP0_STATUS
+    .set at
+       or          k0, v1, ST0_BEV
+    .set noat
+    mtc0        k0, CP0_STATUS
+    ehb
+    mtc0        t0,CP0_EBASE
+
+    /* Setup status register for running guest in UM */
+    .set at
+    or     v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+    and     v1, v1, ~ST0_CU0
+    .set noat
+    mtc0    v1, CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* load the guest context from VCPU and return */
+    LONG_L  $0, VCPU_R0(k1)
+    LONG_L  $1, VCPU_R1(k1)
+    LONG_L  $2, VCPU_R2(k1)
+    LONG_L  $3, VCPU_R3(k1)
+    LONG_L  $4, VCPU_R4(k1)
+    LONG_L  $5, VCPU_R5(k1)
+    LONG_L  $6, VCPU_R6(k1)
+    LONG_L  $7, VCPU_R7(k1)
+    LONG_L  $8, VCPU_R8(k1)
+    LONG_L  $9, VCPU_R9(k1)
+    LONG_L  $10, VCPU_R10(k1)
+    LONG_L  $11, VCPU_R11(k1)
+    LONG_L  $12, VCPU_R12(k1)
+    LONG_L  $13, VCPU_R13(k1)
+    LONG_L  $14, VCPU_R14(k1)
+    LONG_L  $15, VCPU_R15(k1)
+    LONG_L  $16, VCPU_R16(k1)
+    LONG_L  $17, VCPU_R17(k1)
+    LONG_L  $18, VCPU_R18(k1)
+    LONG_L  $19, VCPU_R19(k1)
+    LONG_L  $20, VCPU_R20(k1)
+    LONG_L  $21, VCPU_R21(k1)
+    LONG_L  $22, VCPU_R22(k1)
+    LONG_L  $23, VCPU_R23(k1)
+    LONG_L  $24, VCPU_R24(k1)
+    LONG_L  $25, VCPU_R25(k1)
+
+    /* $/k1 loaded later */
+    LONG_L  $28, VCPU_R28(k1)
+    LONG_L  $29, VCPU_R29(k1)
+    LONG_L  $30, VCPU_R30(k1)
+    LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+    LONG_L  k0, VCPU_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, VCPU_LO(k1)
+    mtlo    k0
+
+    LONG_L  k0, VCPU_R26(k1)
+    LONG_L  k1, VCPU_R27(k1)
+
+    eret
+
+__kvm_mips_return_to_host:
+    /* EBASE is already pointing to Linux */
+    LONG_L  k1, VCPU_HOST_STACK(k1)
+       addiu   k1,k1, -PT_SIZE
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(k1)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore host ASID */
+    LONG_L      k0, PT_HOST_ASID(sp)
+    andi        k0, 0xff
+    mtc0        k0,CP0_ENTRYHI
+    ehb
+
+    /* Load context saved on the host stack */
+    LONG_L  $0, PT_R0(k1)
+    LONG_L  $1, PT_R1(k1)
+
+    /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code  */
+    sra     k0, v0, 2
+    move    $2, k0
+
+    LONG_L  $3, PT_R3(k1)
+    LONG_L  $4, PT_R4(k1)
+    LONG_L  $5, PT_R5(k1)
+    LONG_L  $6, PT_R6(k1)
+    LONG_L  $7, PT_R7(k1)
+    LONG_L  $8, PT_R8(k1)
+    LONG_L  $9, PT_R9(k1)
+    LONG_L  $10, PT_R10(k1)
+    LONG_L  $11, PT_R11(k1)
+    LONG_L  $12, PT_R12(k1)
+    LONG_L  $13, PT_R13(k1)
+    LONG_L  $14, PT_R14(k1)
+    LONG_L  $15, PT_R15(k1)
+    LONG_L  $16, PT_R16(k1)
+    LONG_L  $17, PT_R17(k1)
+    LONG_L  $18, PT_R18(k1)
+    LONG_L  $19, PT_R19(k1)
+    LONG_L  $20, PT_R20(k1)
+    LONG_L  $21, PT_R21(k1)
+    LONG_L  $22, PT_R22(k1)
+    LONG_L  $23, PT_R23(k1)
+    LONG_L  $24, PT_R24(k1)
+    LONG_L  $25, PT_R25(k1)
+
+    /* Host k0/k1 were not saved */
+
+    LONG_L  $28, PT_R28(k1)
+    LONG_L  $29, PT_R29(k1)
+    LONG_L  $30, PT_R30(k1)
+
+    LONG_L  k0, PT_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, PT_LO(k1)
+    mtlo    k0
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+
+    /* Restore RA, which is the address we will return to */
+    LONG_L  ra, PT_R31(k1)
+    j       ra
+    nop
+
+    .set    pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step       $1
+LEAF(MIPSX(SyncICache))
+    .set    push
+       .set    mips32r2
+    beq     a1, zero, 20f
+    nop
+    addu    a1, a0, a1
+    rdhwr   v0, HW_SYNCI_Step
+    beq     v0, zero, 20f
+    nop
+
+10:
+    synci   0(a0)
+    addu    a0, a0, v0
+    sltu    v1, a0, a1
+    bne     v1, zero, 10b
+    nop
+    sync
+20:
+    jr.hb   ra
+    nop
+    .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644 (file)
index 0000000..2e60b1c
--- /dev/null
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait", VCPU_STAT(wait_exits) },
+       { "cache", VCPU_STAT(cache_exits) },
+       { "signal", VCPU_STAT(signal_exits) },
+       { "interrupt", VCPU_STAT(int_exits) },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+       { "tlbmod", VCPU_STAT(tlbmod_exits) },
+       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+       { "syscall", VCPU_STAT(syscall_exits) },
+       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+       { "break_inst", VCPU_STAT(break_inst_exits) },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       int *r = (int *)rtn;
+       *r = 0;
+       return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+
+       if (kvm->arch.guest_pmap)
+               kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_memory_slot old,
+                                  struct kvm_userspace_memory_region *mem,
+                                  bool user_alloc)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  struct kvm_memory_slot old, bool user_alloc)
+{
+       unsigned long npages = 0;
+       int i, err = 0;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+
+                       kvm_info
+                           ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                            npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++) {
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+                       }
+               }
+       }
+out:
+       return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       extern char mips32_exception[], mips32_exceptionEnd[];
+       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /* Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint) {
+               size = 0x200 + VECTORSPACING * 64;
+       } else {
+               size = 0x200;
+       }
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                gebase + offset,
+                mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_emulate_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       if (vcpu->arch.guest_ebase)
+               kfree(vcpu->arch.guest_ebase);
+
+       if (vcpu->arch.kseg0_commpage)
+               kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                   struct kvm_guest_debug *dbg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       local_irq_disable();
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq)) {
+               wake_up_interruptible(&dvcpu->wq);
+       }
+
+       return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+       int intr;
+
+       switch (ioctl) {
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       intr = (int)irq.irq;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -EINVAL;
+       }
+
+out:
+       return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                      ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       int ret;
+
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+       return ret;
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       printk("VCPU Register Dump:\n");
+       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               vcpu->arch.gprs[i] = regs->gprs[i];
+
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               regs->gprs[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+       }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+                           ktime_set(0, MS_TO_NS(10)));
+       return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       kvm_mips_init_shadow_tlb(vcpu);
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /* Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched()) {
+                       cond_resched();
+               }
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+                       ret = RESUME_HOST;
+               }
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug
+                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                    badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err
+                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                    kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace  */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+        * to avoid the possibility of double faulting. The issue is that the TLB code
+        * references routines that are part of the the KVM module,
+        * which are only available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644 (file)
index 0000000..a4a8c85
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       struct mips_coproc cop0;        /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644 (file)
index 0000000..3873b1e
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+       memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+       return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644 (file)
index 0000000..96528e2
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                          struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+/*
+ *  Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                       struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644 (file)
index 0000000..4b6274b
--- /dev/null
@@ -0,0 +1,1829 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /*
+        * Read the instruction
+        */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /*
+                * jr and jalr are in r_format format.
+                */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32) {
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       } else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /*
+                * These are unconditional and in j_format.
+                */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /*
+                * These are conditional and in i_format.
+                */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /*
+                * And now the FPA/cp1 branch instructions.
+                */
+       case cop1_op:
+               printk("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       printk("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       printk("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       /* If COUNT is enabled */
+       if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+               hrtimer_start(&vcpu->arch.comparecount_timer,
+                             ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                      vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /* We we are runnable, then definitely go off to user space to check if any
+                * I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_FAIL;
+       uint32_t pc = vcpu->arch.pc;
+
+       printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               printk
+                   ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                    pc, index, kvm_read_c0_guest_entryhi(cop0),
+                    kvm_read_c0_guest_entrylo0(cop0),
+                    kvm_read_c0_guest_entrylo1(cop0),
+                    kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+            kvm_read_c0_guest_pagemask(cop0));
+
+       return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+#if 1
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+       index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               return EMULATE_FAIL;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0),
+            kvm_read_c0_guest_entrylo1(cop0));
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL) {
+               return er;
+       }
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       /* Verify that the register is valid */
+       if (rd > MIPS_CP0_DESAVE) {
+               printk("Invalid rd: %d\n", rd);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       printk("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+                               vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+                       else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               printk("Invalid TLB Index: %ld",
+                                      vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                      kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid =
+                                   vcpu->arch.gprs[rt] & ASID_MASK;
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+                                   &&
+                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                                     ASID_MASK) != nasid)) {
+
+                                       kvm_debug
+                                           ("MTCz, change ASID from %#lx to %#lx\n",
+                                            kvm_read_c0_guest_entryhi(cop0) &
+                                            ASID_MASK,
+                                            vcpu->arch.gprs[rt] & ASID_MASK);
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* Linux doesn't seem to write into COUNT, we throw an error
+                                * if we notice a write to COUNT
+                                */
+                               /*er = EMULATE_FAIL; */
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_write_c0_guest_compare(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /* Make sure that CU1 and NMI bits are never set */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       printk
+                           ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                            vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       printk
+                           ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                            vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               printk("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /*
+        * Rollback PC if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               printk("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+       mips32_SyncICache(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       extern void (*r4k_blast_dcache) (void);
+       extern void (*r4k_blast_icache) (void);
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+        * the caches entirely by stepping through all the ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug
+                   ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                    arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       printk("%s: unsupported CACHE INDEX operation\n",
+                              __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               }
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+                       goto skip_fault;
+               }
+
+               /* If address not in the guest TLB, then give the guest a fault, the
+                * resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 (kvm_read_c0_guest_entryhi
+                                                  (cop0) & ASID_MASK));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /* We fault an entry from the guest tlb to the shadow host TLB */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               printk
+                   ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               printk
+                   ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+      dont_update_pc:
+       /*
+        * Rollback PC
+        */
+       vcpu->arch.pc = curr_pc;
+      done:
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD) {
+               opc += 1;
+       }
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               printk("Instruction emulation not supported (%p/%#x)\n", opc,
+                      inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+       /*
+        * If address not in the guest TLB, then we are in trouble
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                  struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               printk("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       printk("RDHWR: Cont register\n");
+                       arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+#if 1
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+                       /* UserLocal not implemented */
+                       er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+                       break;
+
+               default:
+                       printk("RDHWR not supported\n");
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       } else {
+               printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               er = EMULATE_FAIL;
+       }
+
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+       return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               printk("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug
+                   ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                    vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                    vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               printk("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: LD MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: ST MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       printk("%s: address error ST @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       printk("%s: address error LD @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL) {
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+       }
+       return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+        * Check the Guest TLB, if the entry is not there then send the guest an
+        * exception. The guest exc handler should then inject an entry into the
+        * guest TLB
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         (kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0) & ASID_MASK));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       printk("%s: invalid exc code: %d\n", __func__, exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               printk("%s: invalid exc code: %d\n", __func__,
+                                      exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+#ifdef DEBUG
+                       kvm_debug
+                           ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644 (file)
index 0000000..1e5de16
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /* Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /* Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                          struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                       uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                     uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644 (file)
index 0000000..20da7d2
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644 (file)
index 0000000..86d3b4c
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define     mfmcz_op         0x0b      /*  01011  */
+#define     wrpgpr_op        0x0e      /*  01110  */
+
+/*  COP0 opcodes (only if COP0 and CO=1):  */
+#define     wait_op               0x20 /*  100000  */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644 (file)
index 0000000..075904b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                      vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644 (file)
index 0000000..e3f0d9b
--- /dev/null
@@ -0,0 +1,932 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       printk("HOST TLBs:\n");
+       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       printk("Guest TLBs:\n");
+       printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       volatile struct kvm_mips_tlb tlb;
+
+       printk("Shadow TLBs:\n");
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return;
+
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+       return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+       unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+       unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       volatile int idx;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       if (idx < 0) {
+               idx = read_c0_random() % current_cpu_data.tlbsize;
+               write_c0_index(idx);
+               mtc0_tlbw_hazard();
+       }
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       if (debug) {
+               kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+                         "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                         vcpu->arch.pc, idx, read_c0_entryhi(),
+                         read_c0_entrylo0(), read_c0_entrylo1());
+       }
+#endif
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+                               (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+            vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+            read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+       struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
+                       index = i;
+                       break;
+               }
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+       return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       volatile int idx;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+       return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       if (idx > 0) {
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+       }
+#endif
+
+       return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+                               continue;
+                       }
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                       struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       if (!((asid += ASID_INC) & ASID_MASK)) {
+               if (cpu_has_vtag_icache) {
+                       flush_icache_all();
+               }
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_read();
+               tlbw_use_hazard();
+
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_ctx = read_c0_entryhi();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+               write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               tlbw_use_hazard();
+       }
+
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+       int cpu, entry;
+
+       for_each_possible_cpu(cpu) {
+               for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+                           UNIQUE_ENTRYHI(entry);
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+                           read_c0_pagemask();
+#ifdef DEBUG
+                       kvm_debug
+                           ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+                            cpu, entry,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+               }
+       }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+#ifdef DEBUG
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+                        cpu_context(cpu, current->mm));
+               kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                        cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                        vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+                        vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+       }
+
+       /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+               kvm_mips_flush_host_tlb(0);
+               kvm_shadow_tlb_load(vcpu);
+       }
+#endif
+
+       if (!newasid) {
+               /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(vcpu->arch.
+                                        preempt_entryhi & ASID_MASK);
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /* Were we in guest context? If so then the pre-empted ASID is no longer
+                * valid, we need to set it to what it should be based on the mode of
+                * the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_kernel_asid[cpu] &
+                                                ASID_MASK);
+                       else
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_user_asid[cpu] &
+                                                ASID_MASK);
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1)) {
+               kvm_shadow_tlb_put(vcpu);
+       }
+#endif
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       index =
+                           kvm_mips_guest_tlb_lookup(vcpu,
+                                                     ((unsigned long) opc & VPN2_MASK)
+                                                     |
+                                                     (kvm_read_c0_guest_entryhi
+                                                      (cop0) & ASID_MASK));
+                       if (index < 0) {
+                               kvm_err
+                                   ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                    __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                        (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644 (file)
index 0000000..466aeef
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+       return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       } else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+                * using HIGHMEM. Need to address this in a HIGHMEM kernel
+                */
+               printk
+                   ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               printk
+                   ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+                * expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err
+                   ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+#endif
+
+               /* User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+                *     case we pass on the fault to the guest kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+                *     case we inject the TLB from the Guest TLB into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+               kvm_debug("Emulate Store to MMIO space\n");
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+       kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+       kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+       kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+       kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+       kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+       kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+       kvm_write_c0_guest_pagemask(cop0,
+                                   regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+       kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+       kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+       return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+       regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+       regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+       regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+       regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+           kvm_read_c0_guest_pagemask(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+       regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+       regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+       return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /* Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a
+        * MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0,
+                                  MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+                                                                      CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+       .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+       .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644 (file)
index 0000000..bc9e0f4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+           TP_ARGS(vcpu, reason),
+           TP_STRUCT__entry(
+                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned int, reason)
+           ),
+
+           TP_fast_assign(
+                       __entry->vcpu = vcpu;
+                       __entry->reason = reason;
+           ),
+
+           TP_printk("[%s]PC: 0x%08lx",
+                     kvm_mips_exit_types_str[__entry->reason],
+                     __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 2078915eacb9c9ff740ec2daa1a65274ad1de7a9..96f4d5a6c21c8d9e5a75fc1f8246b374071df8e3 100644 (file)
@@ -136,7 +136,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
 
 static void __cpuinit r4k_blast_dcache_setup(void)
 {
@@ -264,7 +265,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 }
 
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
 
 static void __cpuinit r4k_blast_icache_setup(void)
 {
index 07cec4407b0c00cf073f406738256bd363913dcf..5aeb3eb0b72f87b5f108a6d287517e53d038be2a 100644 (file)
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
 
 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
index 493131c81a29b9a1c45bf44a000596ccdf17035f..c643de4c473a8d67115c7f0d304ebe1dc1e8c4ce 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
        FLUSH_ITLB;
        EXIT_CRITICAL(flags);
 }
+EXPORT_SYMBOL(local_flush_tlb_all);
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
index 5b548b5a4fcf1f5bf116d6dd59e4c42755193c4c..2cc72c9b38e3ba508a1d9b6596da4403f67e6539 100644 (file)
@@ -3,5 +3,9 @@
 #
 platform-$(CONFIG_MIPS_MALTA)  += mti-malta/
 cflags-$(CONFIG_MIPS_MALTA)    += -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA)      += 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_MALTA)  += 0x0000000040100000
+else
+    load-$(CONFIG_MIPS_MALTA)  += 0xffffffff80100000
+endif
 all-$(CONFIG_MIPS_MALTA)       := $(COMPRESSION_FNAME).bin
index a144b89cf9ba299e5f9fb2bcea5fd179f5ceb413..bc6ac00c0d5712eda01313ebfd399362028f2971 100644 (file)
@@ -76,6 +76,21 @@ static void __init estimate_frequencies(void)
        unsigned int count, start;
        unsigned int giccount = 0, gicstart = 0;
 
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+       unsigned int prid = read_c0_prid() & 0xffff00;
+
+       /*
+        * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+        */
+       count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+               count *= 2;
+
+       mips_hpt_frequency = count;
+       return;
+#endif
+
        local_irq_save(flags);
 
        /* Start counter exactly on falling edge of update flag. */
index 1d1919a44e88c350685866d8420b24fa9e168dc9..7a53b1e28a93dfa512da4e5a6dff6f49e9ae9baa 100644 (file)
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
  * data structures on the first couple of pages of the first slot of each
  * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
  */
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
 {
        unsigned long loadbase = REP_BASE;
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
index 3505d08ff2fdd8297848f823b068466331bf9b0a..64d8dab06b01697425e4a6073bf1e1d6ae0bca44 100644 (file)
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
        }
 }
 
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-       return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+       return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
 }
 
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
 {
        nasid_t nasid;
        lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
 
 static void __init szmem(void)
 {
-       pfn_t slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
+       unsigned long slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
        int slot;
        cnodeid_t node;
 
@@ -390,10 +390,10 @@ static void __init szmem(void)
 
 static void __init node_mem_init(cnodeid_t node)
 {
-       pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-       pfn_t slot_freepfn = node_getfirstfree(node);
+       unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+       unsigned long slot_freepfn = node_getfirstfree(node);
        unsigned long bootmap_size;
-       pfn_t start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn;
 
        get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
@@ -467,7 +467,7 @@ void __init paging_init(void)
        pagetable_init();
 
        for_each_online_node(node) {
-               pfn_t start_pfn, end_pfn;
+               unsigned long start_pfn, end_pfn;
 
                get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
index f18013f09e68423d68d4033d1e22fe1f8701886b..1fc942048521085a960074affc2663d93a70e93e 100644 (file)
@@ -1981,7 +1981,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.