]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/mips/kvm/vz.c
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / mips / kvm / vz.c
index ec909fcd08ce98b59780a1b22ca2acc82f6ce1a2..71d8856ade64c875ecebd1513f9d12262f012d44 100644 (file)
@@ -131,10 +131,10 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
  * Config:     M, [MT]
  * Config1:    M, [MMUSize-1, C2, MD, PC, WR, CA], FP
  * Config2:    M
- * Config3:    M, MSAP, [BPG], ULRI, [DSP2P, DSPP, CTXTC, ITL, LPA, VEIC,
+ * Config3:    M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
  *             VInt, SP, CDMM, MT, SM, TL]
  * Config4:    M, [VTLBSizeExt, MMUSizeExt]
- * Config5:    [MRP]
+ * Config5:    MRP
  */
 
 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
@@ -161,7 +161,7 @@ static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
 {
        unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
-               MIPS_CONF3_ULRI;
+               MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
 
        /* Permit MSA to be present if MSA is supported */
        if (kvm_mips_guest_can_have_msa(&vcpu->arch))
@@ -177,7 +177,7 @@ static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
 
 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
 {
-       return kvm_vz_config5_guest_wrmask(vcpu);
+       return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
 }
 
 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
@@ -353,13 +353,38 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
  * VZ guest timer handling.
  */
 
+/**
+ * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Returns:    true if the VZ GTOffset & real guest CP0_Count should be used
+ *             instead of software emulation of guest timer.
+ *             false otherwise.
+ */
+static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
+{
+       if (kvm_mips_count_disabled(vcpu))
+               return false;
+
+       /* Chosen frequency must match real frequency */
+       if (mips_hpt_frequency != vcpu->arch.count_hz)
+               return false;
+
+       /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
+       if (current_cpu_data.gtoffset_mask != 0xffffffff)
+               return false;
+
+       return true;
+}
+
 /**
  * _kvm_vz_restore_stimer() - Restore soft timer state.
  * @vcpu:      Virtual CPU.
  * @compare:   CP0_Compare register value, restored by caller.
  * @cause:     CP0_Cause register to restore.
  *
- * Restore VZ state relating to the soft timer.
+ * Restore VZ state relating to the soft timer. The hard timer can be enabled
+ * later.
  */
 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
                                   u32 cause)
@@ -375,7 +400,47 @@ static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
 }
 
 /**
- * kvm_vz_restore_timer() - Restore guest timer state.
+ * _kvm_vz_restore_htimer() - Restore hard timer state.
+ * @vcpu:      Virtual CPU.
+ * @compare:   CP0_Compare register value, restored by caller.
+ * @cause:     CP0_Cause register to restore.
+ *
+ * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
+ * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
+ */
+static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
+                                  u32 compare, u32 cause)
+{
+       u32 start_count, after_count;
+       ktime_t freeze_time;
+       unsigned long flags;
+
+       /*
+        * Freeze the soft-timer and sync the guest CP0_Count with it. We do
+        * this with interrupts disabled to avoid latency.
+        */
+       local_irq_save(flags);
+       freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
+       write_c0_gtoffset(start_count - read_c0_count());
+       local_irq_restore(flags);
+
+       /* restore guest CP0_Cause, as TI may already be set */
+       back_to_back_c0_hazard();
+       write_gc0_cause(cause);
+
+       /*
+        * The above sequence isn't atomic and would result in lost timer
+        * interrupts if we're not careful. Detect if a timer interrupt is due
+        * and assert it.
+        */
+       back_to_back_c0_hazard();
+       after_count = read_gc0_count();
+       if (after_count - start_count > compare - start_count - 1)
+               kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+/**
+ * kvm_vz_restore_timer() - Restore timer state.
  * @vcpu:      Virtual CPU.
  *
  * Restore soft timer state from saved context.
@@ -392,25 +457,248 @@ static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
        _kvm_vz_restore_stimer(vcpu, compare, cause);
 }
 
+/**
+ * kvm_vz_acquire_htimer() - Switch to hard timer state.
+ * @vcpu:      Virtual CPU.
+ *
+ * Restore hard timer state on top of existing soft timer state if possible.
+ *
+ * Since hard timer won't remain active over preemption, preemption should be
+ * disabled by the caller.
+ */
+void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
+{
+       u32 gctl0;
+
+       gctl0 = read_c0_guestctl0();
+       if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
+               /* enable guest access to hard timer */
+               write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
+
+               _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
+                                      read_gc0_cause());
+       }
+}
+
+/**
+ * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
+ * @vcpu:      Virtual CPU.
+ * @compare:   Pointer to write compare value to.
+ * @cause:     Pointer to write cause value to.
+ *
+ * Save VZ guest timer state and switch to software emulation of guest CP0
+ * timer. The hard timer must already be in use, so preemption should be
+ * disabled.
+ */
+static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
+                               u32 *out_compare, u32 *out_cause)
+{
+       u32 cause, compare, before_count, end_count;
+       ktime_t before_time;
+
+       compare = read_gc0_compare();
+       *out_compare = compare;
+
+       before_time = ktime_get();
+
+       /*
+        * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
+        * at which no pending timer interrupt is missing.
+        */
+       before_count = read_gc0_count();
+       back_to_back_c0_hazard();
+       cause = read_gc0_cause();
+       *out_cause = cause;
+
+       /*
+        * Record a final CP0_Count which we will transfer to the soft-timer.
+        * This is recorded *after* saving CP0_Cause, so we don't get any timer
+        * interrupts from just after the final CP0_Count point.
+        */
+       back_to_back_c0_hazard();
+       end_count = read_gc0_count();
+
+       /*
+        * The above sequence isn't atomic, so we could miss a timer interrupt
+        * between reading CP0_Cause and end_count. Detect and record any timer
+        * interrupt due between before_count and end_count.
+        */
+       if (end_count - before_count > compare - before_count - 1)
+               kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+       /*
+        * Restore soft-timer, ignoring a small amount of negative drift due to
+        * delay between freeze_hrtimer and setting CP0_GTOffset.
+        */
+       kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
+}
+
 /**
  * kvm_vz_save_timer() - Save guest timer state.
  * @vcpu:      Virtual CPU.
  *
- * Save VZ guest timer state.
+ * Save VZ guest timer state and switch to soft guest timer if hard timer was in
+ * use.
  */
 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
-       u32 compare, cause;
+       u32 gctl0, compare, cause;
 
-       compare = read_gc0_compare();
-       cause = read_gc0_cause();
+       gctl0 = read_c0_guestctl0();
+       if (gctl0 & MIPS_GCTL0_GT) {
+               /* disable guest use of hard timer */
+               write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+               /* save hard timer state */
+               _kvm_vz_save_htimer(vcpu, &compare, &cause);
+       } else {
+               compare = read_gc0_compare();
+               cause = read_gc0_cause();
+       }
 
        /* save timer-related state to VCPU context */
        kvm_write_sw_gc0_cause(cop0, cause);
        kvm_write_sw_gc0_compare(cop0, compare);
 }
 
+/**
+ * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
+ * @vcpu:      Virtual CPU.
+ *
+ * Transfers the state of the hard guest timer to the soft guest timer, leaving
+ * guest state intact so it can continue to be used with the soft timer.
+ */
+void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
+{
+       u32 gctl0, compare, cause;
+
+       preempt_disable();
+       gctl0 = read_c0_guestctl0();
+       if (gctl0 & MIPS_GCTL0_GT) {
+               /* disable guest use of timer */
+               write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+               /* switch to soft timer */
+               _kvm_vz_save_htimer(vcpu, &compare, &cause);
+
+               /* leave soft timer in usable state */
+               _kvm_vz_restore_stimer(vcpu, compare, cause);
+       }
+       preempt_enable();
+}
+
+/**
+ * is_eva_access() - Find whether an instruction is an EVA memory accessor.
+ * @inst:      32-bit instruction encoding.
+ *
+ * Finds whether @inst encodes an EVA memory access instruction, which would
+ * indicate that emulation of it should access the user mode address space
+ * instead of the kernel mode address space. This matters for MUSUK segments
+ * which are TLB mapped for user mode but unmapped for kernel mode.
+ *
+ * Returns:    Whether @inst encodes an EVA accessor instruction.
+ */
+static bool is_eva_access(union mips_instruction inst)
+{
+       if (inst.spec3_format.opcode != spec3_op)
+               return false;
+
+       switch (inst.spec3_format.func) {
+       case lwle_op:
+       case lwre_op:
+       case cachee_op:
+       case sbe_op:
+       case she_op:
+       case sce_op:
+       case swe_op:
+       case swle_op:
+       case swre_op:
+       case prefe_op:
+       case lbue_op:
+       case lhue_op:
+       case lbe_op:
+       case lhe_op:
+       case lle_op:
+       case lwe_op:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * is_eva_am_mapped() - Find whether an access mode is mapped.
+ * @vcpu:      KVM VCPU state.
+ * @am:                3-bit encoded access mode.
+ * @eu:                Segment becomes unmapped and uncached when Status.ERL=1.
+ *
+ * Decode @am to find whether it encodes a mapped segment for the current VCPU
+ * state. Where necessary @eu and the actual instruction causing the fault are
+ * taken into account to make the decision.
+ *
+ * Returns:    Whether the VCPU faulted on a TLB mapped address.
+ */
+static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
+{
+       u32 am_lookup;
+       int err;
+
+       /*
+        * Interpret access control mode. We assume address errors will already
+        * have been caught by the guest, leaving us with:
+        *      AM      UM  SM  KM  31..24 23..16
+        * UK    0 000          Unm   0      0
+        * MK    1 001          TLB   1
+        * MSK   2 010      TLB TLB   1
+        * MUSK  3 011  TLB TLB TLB   1
+        * MUSUK 4 100  TLB TLB Unm   0      1
+        * USK   5 101      Unm Unm   0      0
+        * -     6 110                0      0
+        * UUSK  7 111  Unm Unm Unm   0      0
+        *
+        * We shift a magic value by AM across the sign bit to find if always
+        * TLB mapped, and if not shift by 8 again to find if it depends on KM.
+        */
+       am_lookup = 0x70080000 << am;
+       if ((s32)am_lookup < 0) {
+               /*
+                * MK, MSK, MUSK
+                * Always TLB mapped, unless SegCtl.EU && ERL
+                */
+               if (!eu || !(read_gc0_status() & ST0_ERL))
+                       return true;
+       } else {
+               am_lookup <<= 8;
+               if ((s32)am_lookup < 0) {
+                       union mips_instruction inst;
+                       unsigned int status;
+                       u32 *opc;
+
+                       /*
+                        * MUSUK
+                        * TLB mapped if not in kernel mode
+                        */
+                       status = read_gc0_status();
+                       if (!(status & (ST0_EXL | ST0_ERL)) &&
+                           (status & ST0_KSU))
+                               return true;
+                       /*
+                        * EVA access instructions in kernel
+                        * mode access user address space.
+                        */
+                       opc = (u32 *)vcpu->arch.pc;
+                       if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
+                               opc += 1;
+                       err = kvm_get_badinstr(opc, vcpu, &inst.word);
+                       if (!err && is_eva_access(inst))
+                               return true;
+               }
+       }
+
+       return false;
+}
+
 /**
  * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
  * @vcpu:      KVM VCPU state.
@@ -427,10 +715,58 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
                             unsigned long *gpa)
 {
        u32 gva32 = gva;
+       unsigned long segctl;
 
        if ((long)gva == (s32)gva32) {
                /* Handle canonical 32-bit virtual address */
-               if ((s32)gva32 < (s32)0xc0000000) {
+               if (cpu_guest_has_segments) {
+                       unsigned long mask, pa;
+
+                       switch (gva32 >> 29) {
+                       case 0:
+                       case 1: /* CFG5 (1GB) */
+                               segctl = read_gc0_segctl2() >> 16;
+                               mask = (unsigned long)0xfc0000000ull;
+                               break;
+                       case 2:
+                       case 3: /* CFG4 (1GB) */
+                               segctl = read_gc0_segctl2();
+                               mask = (unsigned long)0xfc0000000ull;
+                               break;
+                       case 4: /* CFG3 (512MB) */
+                               segctl = read_gc0_segctl1() >> 16;
+                               mask = (unsigned long)0xfe0000000ull;
+                               break;
+                       case 5: /* CFG2 (512MB) */
+                               segctl = read_gc0_segctl1();
+                               mask = (unsigned long)0xfe0000000ull;
+                               break;
+                       case 6: /* CFG1 (512MB) */
+                               segctl = read_gc0_segctl0() >> 16;
+                               mask = (unsigned long)0xfe0000000ull;
+                               break;
+                       case 7: /* CFG0 (512MB) */
+                               segctl = read_gc0_segctl0();
+                               mask = (unsigned long)0xfe0000000ull;
+                               break;
+                       default:
+                               /*
+                                * GCC 4.9 isn't smart enough to figure out that
+                                * segctl and mask are always initialised.
+                                */
+                               unreachable();
+                       }
+
+                       if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
+                                            segctl & 0x0008))
+                               goto tlb_mapped;
+
+                       /* Unmapped, find guest physical address */
+                       pa = (segctl << 20) & mask;
+                       pa |= gva32 & ~mask;
+                       *gpa = pa;
+                       return 0;
+               } else if ((s32)gva32 < (s32)0xc0000000) {
                        /* legacy unmapped KSeg0 or KSeg1 */
                        *gpa = gva32 & 0x1fffffff;
                        return 0;
@@ -438,6 +774,20 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
 #ifdef CONFIG_64BIT
        } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
                /* XKPHYS */
+               if (cpu_guest_has_segments) {
+                       /*
+                        * Each of the 8 regions can be overridden by SegCtl2.XR
+                        * to use SegCtl1.XAM.
+                        */
+                       segctl = read_gc0_segctl2();
+                       if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
+                               segctl = read_gc0_segctl1();
+                               if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
+                                                    0))
+                                       goto tlb_mapped;
+                       }
+
+               }
                /*
                 * Traditionally fully unmapped.
                 * Bits 61:59 specify the CCA, which we can just mask off here.
@@ -449,6 +799,7 @@ static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
 #endif
        }
 
+tlb_mapped:
        return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
 }
 
@@ -511,6 +862,41 @@ static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
        return RESUME_HOST;
 }
 
+static unsigned long mips_process_maar(unsigned int op, unsigned long val)
+{
+       /* Mask off unused bits */
+       unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
+
+       if (read_gc0_pagegrain() & PG_ELPA)
+               mask |= 0x00ffffff00000000ull;
+       if (cpu_guest_has_mvh)
+               mask |= MIPS_MAAR_VH;
+
+       /* Set or clear VH */
+       if (op == mtc_op) {
+               /* clear VH */
+               val &= ~MIPS_MAAR_VH;
+       } else if (op == dmtc_op) {
+               /* set VH to match VL */
+               val &= ~MIPS_MAAR_VH;
+               if (val & MIPS_MAAR_VL)
+                       val |= MIPS_MAAR_VH;
+       }
+
+       return val & mask;
+}
+
+static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       val &= MIPS_MAARI_INDEX;
+       if (val == MIPS_MAARI_INDEX)
+               kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
+       else if (val < ARRAY_SIZE(vcpu->arch.maar))
+               kvm_write_sw_gc0_maari(cop0, val);
+}
+
 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
                                              u32 *opc, u32 cause,
                                              struct kvm_run *run,
@@ -556,6 +942,22 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
                        } else if (rd == MIPS_CP0_COMPARE &&
                                   sel == 0) {          /* Compare */
                                val = read_gc0_compare();
+                       } else if (rd == MIPS_CP0_LLADDR &&
+                                  sel == 0) {          /* LLAddr */
+                               if (cpu_guest_has_rw_llb)
+                                       val = read_gc0_lladdr() &
+                                               MIPS_LLADDR_LLB;
+                               else
+                                       val = 0;
+                       } else if (rd == MIPS_CP0_LLADDR &&
+                                  sel == 1 &&          /* MAAR */
+                                  cpu_guest_has_maar &&
+                                  !cpu_guest_has_dyn_maar) {
+                               /* MAARI must be in range */
+                               BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+                                               ARRAY_SIZE(vcpu->arch.maar));
+                               val = vcpu->arch.maar[
+                                       kvm_read_sw_gc0_maari(cop0)];
                        } else if ((rd == MIPS_CP0_PRID &&
                                    (sel == 0 ||        /* PRid */
                                     sel == 2 ||        /* CDMMBase */
@@ -565,6 +967,10 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
                                     sel == 3)) ||      /* SRSMap */
                                   (rd == MIPS_CP0_CONFIG &&
                                    (sel == 7)) ||      /* Config7 */
+                                  (rd == MIPS_CP0_LLADDR &&
+                                   (sel == 2) &&       /* MAARI */
+                                   cpu_guest_has_maar &&
+                                   !cpu_guest_has_dyn_maar) ||
                                   (rd == MIPS_CP0_ERRCTL &&
                                    (sel == 0))) {      /* ErrCtl */
                                val = cop0->reg[rd][sel];
@@ -597,12 +1003,39 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
 
                        if (rd == MIPS_CP0_COUNT &&
                            sel == 0) {                 /* Count */
+                               kvm_vz_lose_htimer(vcpu);
                                kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
                        } else if (rd == MIPS_CP0_COMPARE &&
                                   sel == 0) {          /* Compare */
                                kvm_mips_write_compare(vcpu,
                                                       vcpu->arch.gprs[rt],
                                                       true);
+                       } else if (rd == MIPS_CP0_LLADDR &&
+                                  sel == 0) {          /* LLAddr */
+                               /*
+                                * P5600 generates GPSI on guest MTC0 LLAddr.
+                                * Only allow the guest to clear LLB.
+                                */
+                               if (cpu_guest_has_rw_llb &&
+                                   !(val & MIPS_LLADDR_LLB))
+                                       write_gc0_lladdr(0);
+                       } else if (rd == MIPS_CP0_LLADDR &&
+                                  sel == 1 &&          /* MAAR */
+                                  cpu_guest_has_maar &&
+                                  !cpu_guest_has_dyn_maar) {
+                               val = mips_process_maar(inst.c0r_format.rs,
+                                                       val);
+
+                               /* MAARI must be in range */
+                               BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+                                               ARRAY_SIZE(vcpu->arch.maar));
+                               vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
+                                                                       val;
+                       } else if (rd == MIPS_CP0_LLADDR &&
+                                  (sel == 2) &&        /* MAARI */
+                                  cpu_guest_has_maar &&
+                                  !cpu_guest_has_dyn_maar) {
+                               kvm_write_maari(vcpu, val);
                        } else if (rd == MIPS_CP0_ERRCTL &&
                                   (sel == 0)) {        /* ErrCtl */
                                /* ignore the written value */
@@ -672,6 +1105,17 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
        case Index_Writeback_Inv_D:
                flush_dcache_line_indexed(va);
                return EMULATE_DONE;
+       case Hit_Invalidate_I:
+       case Hit_Invalidate_D:
+       case Hit_Writeback_Inv_D:
+               if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
+                       /* We can just flush entire icache */
+                       local_flush_icache_range(0, 0);
+                       return EMULATE_DONE;
+               }
+
+               /* So far, other platforms support guest hit cache ops */
+               break;
        default:
                break;
        };
@@ -835,10 +1279,12 @@ static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
 
                        /* DC bit enabling/disabling timer? */
                        if (change & CAUSEF_DC) {
-                               if (val & CAUSEF_DC)
+                               if (val & CAUSEF_DC) {
+                                       kvm_vz_lose_htimer(vcpu);
                                        kvm_mips_count_disable_cause(vcpu);
-                               else
+                               } else {
                                        kvm_mips_count_enable_cause(vcpu);
+                               }
                        }
 
                        /* Only certain bits are RW to the guest */
@@ -887,6 +1333,18 @@ static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
        return er;
 }
 
+static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
+                                                    struct kvm_vcpu *vcpu)
+{
+       /*
+        * Presumably this is due to MC (guest mode change), so lets trace some
+        * relevant info.
+        */
+       trace_kvm_guest_mode_change(vcpu);
+
+       return EMULATE_DONE;
+}
+
 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
                                                   struct kvm_vcpu *vcpu)
 {
@@ -972,8 +1430,7 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
                break;
        case MIPS_GCTL0_GEXC_GHFC:
                ++vcpu->stat.vz_ghfc_exits;
-               er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
-                                                      vcpu);
+               er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
                break;
        case MIPS_GCTL0_GEXC_GPA:
                ++vcpu->stat.vz_gpa_exits;
@@ -1205,6 +1662,26 @@ static u64 kvm_vz_get_one_regs[] = {
        KVM_REG_MIPS_COUNT_HZ,
 };
 
+static u64 kvm_vz_get_one_regs_contextconfig[] = {
+       KVM_REG_MIPS_CP0_CONTEXTCONFIG,
+#ifdef CONFIG_64BIT
+       KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
+#endif
+};
+
+static u64 kvm_vz_get_one_regs_segments[] = {
+       KVM_REG_MIPS_CP0_SEGCTL0,
+       KVM_REG_MIPS_CP0_SEGCTL1,
+       KVM_REG_MIPS_CP0_SEGCTL2,
+};
+
+static u64 kvm_vz_get_one_regs_htw[] = {
+       KVM_REG_MIPS_CP0_PWBASE,
+       KVM_REG_MIPS_CP0_PWFIELD,
+       KVM_REG_MIPS_CP0_PWSIZE,
+       KVM_REG_MIPS_CP0_PWCTL,
+};
+
 static u64 kvm_vz_get_one_regs_kscratch[] = {
        KVM_REG_MIPS_CP0_KSCRATCH1,
        KVM_REG_MIPS_CP0_KSCRATCH2,
@@ -1225,6 +1702,14 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
                ++ret;
        if (cpu_guest_has_badinstrp)
                ++ret;
+       if (cpu_guest_has_contextconfig)
+               ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+       if (cpu_guest_has_segments)
+               ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+       if (cpu_guest_has_htw)
+               ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+       if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
+               ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
        ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
 
        return ret;
@@ -1258,6 +1743,37 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
                        return -EFAULT;
                ++indices;
        }
+       if (cpu_guest_has_contextconfig) {
+               if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
+                                sizeof(kvm_vz_get_one_regs_contextconfig)))
+                       return -EFAULT;
+               indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+       }
+       if (cpu_guest_has_segments) {
+               if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
+                                sizeof(kvm_vz_get_one_regs_segments)))
+                       return -EFAULT;
+               indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+       }
+       if (cpu_guest_has_htw) {
+               if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
+                                sizeof(kvm_vz_get_one_regs_htw)))
+                       return -EFAULT;
+               indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+       }
+       if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
+               for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
+                       index = KVM_REG_MIPS_CP0_MAAR(i);
+                       if (copy_to_user(indices, &index, sizeof(index)))
+                               return -EFAULT;
+                       ++indices;
+               }
+
+               index = KVM_REG_MIPS_CP0_MAARI;
+               if (copy_to_user(indices, &index, sizeof(index)))
+                       return -EFAULT;
+               ++indices;
+       }
        for (i = 0; i < 6; ++i) {
                if (!cpu_guest_has_kscr(i + 2))
                        continue;
@@ -1323,20 +1839,67 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
        case KVM_REG_MIPS_CP0_CONTEXT:
                *v = (long)read_gc0_context();
                break;
+       case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+               if (!cpu_guest_has_contextconfig)
+                       return -EINVAL;
+               *v = read_gc0_contextconfig();
+               break;
        case KVM_REG_MIPS_CP0_USERLOCAL:
                if (!cpu_guest_has_userlocal)
                        return -EINVAL;
                *v = read_gc0_userlocal();
                break;
+#ifdef CONFIG_64BIT
+       case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+               if (!cpu_guest_has_contextconfig)
+                       return -EINVAL;
+               *v = read_gc0_xcontextconfig();
+               break;
+#endif
        case KVM_REG_MIPS_CP0_PAGEMASK:
                *v = (long)read_gc0_pagemask();
                break;
        case KVM_REG_MIPS_CP0_PAGEGRAIN:
                *v = (long)read_gc0_pagegrain();
                break;
+       case KVM_REG_MIPS_CP0_SEGCTL0:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               *v = read_gc0_segctl0();
+               break;
+       case KVM_REG_MIPS_CP0_SEGCTL1:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               *v = read_gc0_segctl1();
+               break;
+       case KVM_REG_MIPS_CP0_SEGCTL2:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               *v = read_gc0_segctl2();
+               break;
+       case KVM_REG_MIPS_CP0_PWBASE:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               *v = read_gc0_pwbase();
+               break;
+       case KVM_REG_MIPS_CP0_PWFIELD:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               *v = read_gc0_pwfield();
+               break;
+       case KVM_REG_MIPS_CP0_PWSIZE:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               *v = read_gc0_pwsize();
+               break;
        case KVM_REG_MIPS_CP0_WIRED:
                *v = (long)read_gc0_wired();
                break;
+       case KVM_REG_MIPS_CP0_PWCTL:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               *v = read_gc0_pwctl();
+               break;
        case KVM_REG_MIPS_CP0_HWRENA:
                *v = (long)read_gc0_hwrena();
                break;
@@ -1375,7 +1938,15 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
                *v = (long)read_gc0_epc();
                break;
        case KVM_REG_MIPS_CP0_PRID:
-               *v = (long)kvm_read_c0_guest_prid(cop0);
+               switch (boot_cpu_type()) {
+               case CPU_CAVIUM_OCTEON3:
+                       /* Octeon III has a read-only guest.PRid */
+                       *v = read_gc0_prid();
+                       break;
+               default:
+                       *v = (long)kvm_read_c0_guest_prid(cop0);
+                       break;
+               };
                break;
        case KVM_REG_MIPS_CP0_EBASE:
                *v = kvm_vz_read_gc0_ebase();
@@ -1408,6 +1979,19 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
                        return -EINVAL;
                *v = read_gc0_config5();
                break;
+       case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+               if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+                       return -EINVAL;
+               idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+               if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+                       return -EINVAL;
+               *v = vcpu->arch.maar[idx];
+               break;
+       case KVM_REG_MIPS_CP0_MAARI:
+               if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+                       return -EINVAL;
+               *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
+               break;
 #ifdef CONFIG_64BIT
        case KVM_REG_MIPS_CP0_XCONTEXT:
                *v = read_gc0_xcontext();
@@ -1478,20 +2062,67 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
        case KVM_REG_MIPS_CP0_CONTEXT:
                write_gc0_context(v);
                break;
+       case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+               if (!cpu_guest_has_contextconfig)
+                       return -EINVAL;
+               write_gc0_contextconfig(v);
+               break;
        case KVM_REG_MIPS_CP0_USERLOCAL:
                if (!cpu_guest_has_userlocal)
                        return -EINVAL;
                write_gc0_userlocal(v);
                break;
+#ifdef CONFIG_64BIT
+       case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+               if (!cpu_guest_has_contextconfig)
+                       return -EINVAL;
+               write_gc0_xcontextconfig(v);
+               break;
+#endif
        case KVM_REG_MIPS_CP0_PAGEMASK:
                write_gc0_pagemask(v);
                break;
        case KVM_REG_MIPS_CP0_PAGEGRAIN:
                write_gc0_pagegrain(v);
                break;
+       case KVM_REG_MIPS_CP0_SEGCTL0:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               write_gc0_segctl0(v);
+               break;
+       case KVM_REG_MIPS_CP0_SEGCTL1:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               write_gc0_segctl1(v);
+               break;
+       case KVM_REG_MIPS_CP0_SEGCTL2:
+               if (!cpu_guest_has_segments)
+                       return -EINVAL;
+               write_gc0_segctl2(v);
+               break;
+       case KVM_REG_MIPS_CP0_PWBASE:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               write_gc0_pwbase(v);
+               break;
+       case KVM_REG_MIPS_CP0_PWFIELD:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               write_gc0_pwfield(v);
+               break;
+       case KVM_REG_MIPS_CP0_PWSIZE:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               write_gc0_pwsize(v);
+               break;
        case KVM_REG_MIPS_CP0_WIRED:
                change_gc0_wired(MIPSR6_WIRED_WIRED, v);
                break;
+       case KVM_REG_MIPS_CP0_PWCTL:
+               if (!cpu_guest_has_htw)
+                       return -EINVAL;
+               write_gc0_pwctl(v);
+               break;
        case KVM_REG_MIPS_CP0_HWRENA:
                write_gc0_hwrena(v);
                break;
@@ -1547,7 +2178,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
                write_gc0_epc(v);
                break;
        case KVM_REG_MIPS_CP0_PRID:
-               kvm_write_c0_guest_prid(cop0, v);
+               switch (boot_cpu_type()) {
+               case CPU_CAVIUM_OCTEON3:
+                       /* Octeon III has a guest.PRid, but its read-only */
+                       break;
+               default:
+                       kvm_write_c0_guest_prid(cop0, v);
+                       break;
+               };
                break;
        case KVM_REG_MIPS_CP0_EBASE:
                kvm_vz_write_gc0_ebase(v);
@@ -1610,6 +2248,19 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
                        write_gc0_config5(v);
                }
                break;
+       case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+               if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+                       return -EINVAL;
+               idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+               if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+                       return -EINVAL;
+               vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
+               break;
+       case KVM_REG_MIPS_CP0_MAARI:
+               if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+                       return -EINVAL;
+               kvm_write_maari(vcpu, v);
+               break;
 #ifdef CONFIG_64BIT
        case KVM_REG_MIPS_CP0_XCONTEXT:
                write_gc0_xcontext(v);
@@ -1845,6 +2496,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
         */
        kvm_vz_restore_timer(vcpu);
 
+       /* Set MC bit if we want to trace guest mode changes */
+       if (kvm_trace_guest_mode_change)
+               set_c0_guestctl0(MIPS_GCTL0_MC);
+       else
+               clear_c0_guestctl0(MIPS_GCTL0_MC);
+
        /* Don't bother restoring registers multiple times unless necessary */
        if (!all)
                return 0;
@@ -1874,8 +2531,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        kvm_restore_gc0_entrylo0(cop0);
        kvm_restore_gc0_entrylo1(cop0);
        kvm_restore_gc0_context(cop0);
+       if (cpu_guest_has_contextconfig)
+               kvm_restore_gc0_contextconfig(cop0);
 #ifdef CONFIG_64BIT
        kvm_restore_gc0_xcontext(cop0);
+       if (cpu_guest_has_contextconfig)
+               kvm_restore_gc0_xcontextconfig(cop0);
 #endif
        kvm_restore_gc0_pagemask(cop0);
        kvm_restore_gc0_pagegrain(cop0);
@@ -1912,11 +2573,33 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (cpu_guest_has_badinstrp)
                kvm_restore_gc0_badinstrp(cop0);
 
+       if (cpu_guest_has_segments) {
+               kvm_restore_gc0_segctl0(cop0);
+               kvm_restore_gc0_segctl1(cop0);
+               kvm_restore_gc0_segctl2(cop0);
+       }
+
+       /* restore HTW registers */
+       if (cpu_guest_has_htw) {
+               kvm_restore_gc0_pwbase(cop0);
+               kvm_restore_gc0_pwfield(cop0);
+               kvm_restore_gc0_pwsize(cop0);
+               kvm_restore_gc0_pwctl(cop0);
+       }
+
        /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
        if (cpu_has_guestctl2)
                write_c0_guestctl2(
                        cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
 
+       /*
+        * We should clear linked load bit to break interrupted atomics. This
+        * prevents a SC on the next VCPU from succeeding by matching a LL on
+        * the previous VCPU.
+        */
+       if (cpu_guest_has_rw_llb)
+               write_gc0_lladdr(0);
+
        return 0;
 }
 
@@ -1933,8 +2616,12 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
        kvm_save_gc0_entrylo0(cop0);
        kvm_save_gc0_entrylo1(cop0);
        kvm_save_gc0_context(cop0);
+       if (cpu_guest_has_contextconfig)
+               kvm_save_gc0_contextconfig(cop0);
 #ifdef CONFIG_64BIT
        kvm_save_gc0_xcontext(cop0);
+       if (cpu_guest_has_contextconfig)
+               kvm_save_gc0_xcontextconfig(cop0);
 #endif
        kvm_save_gc0_pagemask(cop0);
        kvm_save_gc0_pagegrain(cop0);
@@ -1991,6 +2678,21 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
        if (cpu_guest_has_badinstrp)
                kvm_save_gc0_badinstrp(cop0);
 
+       if (cpu_guest_has_segments) {
+               kvm_save_gc0_segctl0(cop0);
+               kvm_save_gc0_segctl1(cop0);
+               kvm_save_gc0_segctl2(cop0);
+       }
+
+       /* save HTW registers if enabled in guest */
+       if (cpu_guest_has_htw &&
+           kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+               kvm_save_gc0_pwbase(cop0);
+               kvm_save_gc0_pwfield(cop0);
+               kvm_save_gc0_pwsize(cop0);
+               kvm_save_gc0_pwctl(cop0);
+       }
+
        kvm_vz_save_timer(vcpu);
 
        /* save Root.GuestCtl2 in unused Guest guestctl2 register */
@@ -2071,37 +2773,73 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
 static int kvm_vz_hardware_enable(void)
 {
        unsigned int mmu_size, guest_mmu_size, ftlb_size;
+       u64 guest_cvmctl, cvmvmconfig;
+
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON3:
+               /* Set up guest timer/perfcount IRQ lines */
+               guest_cvmctl = read_gc0_cvmctl();
+               guest_cvmctl &= ~CVMCTL_IPTI;
+               guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
+               guest_cvmctl &= ~CVMCTL_IPPCI;
+               guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
+               write_gc0_cvmctl(guest_cvmctl);
+
+               cvmvmconfig = read_c0_cvmvmconfig();
+               /* No I/O hole translation. */
+               cvmvmconfig |= CVMVMCONF_DGHT;
+               /* Halve the root MMU size */
+               mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+                           >> CVMVMCONF_MMUSIZEM1_S) + 1;
+               guest_mmu_size = mmu_size / 2;
+               mmu_size -= guest_mmu_size;
+               cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+               cvmvmconfig |= mmu_size - 1;
+               write_c0_cvmvmconfig(cvmvmconfig);
+
+               /* Update our records */
+               current_cpu_data.tlbsize = mmu_size;
+               current_cpu_data.tlbsizevtlb = mmu_size;
+               current_cpu_data.guest.tlbsize = guest_mmu_size;
+
+               /* Flush moved entries in new (guest) context */
+               kvm_vz_local_flush_guesttlb_all();
+               break;
+       default:
+               /*
+                * ImgTec cores tend to use a shared root/guest TLB. To avoid
+                * overlap of root wired and guest entries, the guest TLB may
+                * need resizing.
+                */
+               mmu_size = current_cpu_data.tlbsizevtlb;
+               ftlb_size = current_cpu_data.tlbsize - mmu_size;
 
-       /*
-        * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of
-        * root wired and guest entries, the guest TLB may need resizing.
-        */
-       mmu_size = current_cpu_data.tlbsizevtlb;
-       ftlb_size = current_cpu_data.tlbsize - mmu_size;
-
-       /* Try switching to maximum guest VTLB size for flush */
-       guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
-       current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
-       kvm_vz_local_flush_guesttlb_all();
+               /* Try switching to maximum guest VTLB size for flush */
+               guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
+               current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+               kvm_vz_local_flush_guesttlb_all();
 
-       /*
-        * Reduce to make space for root wired entries and at least 2 root
-        * non-wired entries. This does assume that long-term wired entries
-        * won't be added later.
-        */
-       guest_mmu_size = mmu_size - num_wired_entries() - 2;
-       guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
-       current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+               /*
+                * Reduce to make space for root wired entries and at least 2
+                * root non-wired entries. This does assume that long-term wired
+                * entries won't be added later.
+                */
+               guest_mmu_size = mmu_size - num_wired_entries() - 2;
+               guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
+               current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
 
-       /*
-        * Write the VTLB size, but if another CPU has already written, check it
-        * matches or we won't provide a consistent view to the guest. If this
-        * ever happens it suggests an asymmetric number of wired entries.
-        */
-       if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
-           WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
-                "Available guest VTLB size mismatch"))
-               return -EINVAL;
+               /*
+                * Write the VTLB size, but if another CPU has already written,
+                * check it matches or we won't provide a consistent view to the
+                * guest. If this ever happens it suggests an asymmetric number
+                * of wired entries.
+                */
+               if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
+                   WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
+                        "Available guest VTLB size mismatch"))
+                       return -EINVAL;
+               break;
+       }
 
        /*
         * Enable virtualization features granting guest direct control of
@@ -2138,8 +2876,36 @@ static int kvm_vz_hardware_enable(void)
 
 static void kvm_vz_hardware_disable(void)
 {
+       u64 cvmvmconfig;
+       unsigned int mmu_size;
+
+       /* Flush any remaining guest TLB entries */
        kvm_vz_local_flush_guesttlb_all();
 
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON3:
+               /*
+                * Allocate whole TLB for root. Existing guest TLB entries will
+                * change ownership to the root TLB. We should be safe though as
+                * they've already been flushed above while in guest TLB.
+                */
+               cvmvmconfig = read_c0_cvmvmconfig();
+               mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+                           >> CVMVMCONF_MMUSIZEM1_S) + 1;
+               cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+               cvmvmconfig |= mmu_size - 1;
+               write_c0_cvmvmconfig(cvmvmconfig);
+
+               /* Update our records */
+               current_cpu_data.tlbsize = mmu_size;
+               current_cpu_data.tlbsizevtlb = mmu_size;
+               current_cpu_data.guest.tlbsize = 0;
+
+               /* Flush moved entries in new (root) context */
+               local_flush_tlb_all();
+               break;
+       }
+
        if (cpu_has_guestid) {
                write_c0_guestctl1(0);
                kvm_vz_local_flush_roottlb_all_guests();
@@ -2298,6 +3064,35 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
                kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
        }
 
+       if (cpu_guest_has_contextconfig) {
+               /* ContextConfig */
+               kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
+#ifdef CONFIG_64BIT
+               /* XContextConfig */
+               /* bits SEGBITS-13+3:4 set */
+               kvm_write_sw_gc0_xcontextconfig(cop0,
+                                       ((1ull << (cpu_vmbits - 13)) - 1) << 4);
+#endif
+       }
+
+       /* Implementation dependent, use the legacy layout */
+       if (cpu_guest_has_segments) {
+               /* SegCtl0, SegCtl1, SegCtl2 */
+               kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
+               kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
+                               (_page_cachable_default >> _CACHE_SHIFT) <<
+                                               (16 + MIPS_SEGCFG_C_SHIFT));
+               kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
+       }
+
+       /* reset HTW registers */
+       if (cpu_guest_has_htw && cpu_has_mips_r6) {
+               /* PWField */
+               kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
+               /* PWSize */
+               kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
+       }
+
        /* start with no pending virtual guest interrupts */
        if (cpu_has_guestctl2)
                cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
@@ -2355,6 +3150,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
        int cpu = smp_processor_id();
        int r;
 
+       kvm_vz_acquire_htimer(vcpu);
        /* Check if we have any exceptions/interrupts pending */
        kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());