]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: PPC: booke: Add watchdog emulation
authorBharat Bhushan <r65777@freescale.com>
Wed, 8 Aug 2012 20:38:19 +0000 (20:38 +0000)
committerAlexander Graf <agraf@suse.de>
Mon, 24 Sep 2012 12:50:38 +0000 (14:50 +0200)
This patch adds the watchdog emulation in KVM. The watchdog
emulation is enabled by KVM_ENABLE_CAP(KVM_CAP_PPC_BOOKE_WATCHDOG) ioctl.
The kernel timer are used for watchdog emulation and emulates
h/w watchdog state machine. On watchdog timer expiry, it exit to QEMU
if TCR.WRC is non ZERO. QEMU can reset/shutdown etc depending upon how
it is configured.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
[bharat.bhushan@freescale.com: reworked patch]
Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
[agraf: adjust to new request framework]
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/reg_booke.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/powerpc.c
include/linux/kvm.h
include/linux/kvm_host.h

index 4a5ec8f573c7f1c07481b64816c494c91d314634..51b0ccd56769d8edc82ba15268c8f613a010ab08 100644 (file)
@@ -471,6 +471,8 @@ struct kvm_vcpu_arch {
        ulong fault_esr;
        ulong queued_dear;
        ulong queued_esr;
+       spinlock_t wdt_lock;
+       struct timer_list wdt_timer;
        u32 tlbcfg[4];
        u32 mmucfg;
        u32 epr;
@@ -486,6 +488,7 @@ struct kvm_vcpu_arch {
        u8 osi_needed;
        u8 osi_enabled;
        u8 papr_enabled;
+       u8 watchdog_enabled;
        u8 sane;
        u8 cpu_type;
        u8 hcall_needed;
index 3dfc437fb9d9623da01aec95422d279aba4cfc28..c06a64b533623d45109b620bd13a814e7d4b2673 100644 (file)
@@ -68,6 +68,8 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
 extern void kvmppc_decrementer_func(unsigned long data);
 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
+extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
 
 /* Core-specific hooks */
 
index 2d916c4982c5136a71b10f917606757aa743792f..e07e6af5e1ff9938a00468d49c46aaaee7be8ee9 100644 (file)
 #define TCR_FIE                0x00800000      /* FIT Interrupt Enable */
 #define TCR_ARE                0x00400000      /* Auto Reload Enable */
 
+#ifdef CONFIG_E500
+#define TCR_GET_WP(tcr)  ((((tcr) & 0xC0000000) >> 30) | \
+                             (((tcr) & 0x1E0000) >> 15))
+#else
+#define TCR_GET_WP(tcr)  (((tcr) & 0xC0000000) >> 30)
+#endif
+
 /* Bit definitions for the TSR. */
 #define TSR_ENW                0x80000000      /* Enable Next Watchdog */
 #define TSR_WIS                0x40000000      /* WDT Interrupt Status */
index 3f2a8360c857f1aae94a13631b2eb099a96795a0..e94666566fa93c6e1b9404a8e92d467731afbb35 100644 (file)
@@ -411,6 +411,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        int i;
index c36493087dbf896b6672ea4016431dfe6593a972..09e8bf33a8c977b8be7214bb7f6ed9e9617485b2 100644 (file)
@@ -209,6 +209,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
        clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
 }
 
+static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
+{
+       kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
+}
+
+static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
+{
+       clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
+}
+
 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
 {
 #ifdef CONFIG_KVM_BOOKE_HV
@@ -328,6 +338,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                msr_mask = MSR_CE | MSR_ME | MSR_DE;
                int_class = INT_CLASS_NONCRIT;
                break;
+       case BOOKE_IRQPRIO_WATCHDOG:
        case BOOKE_IRQPRIO_CRITICAL:
        case BOOKE_IRQPRIO_DBELL_CRIT:
                allowed = vcpu->arch.shared->msr & MSR_CE;
@@ -407,12 +418,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
        return allowed;
 }
 
+/*
+ * Return the number of jiffies until the next timeout.  If the timeout is
+ * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
+ * because the larger value can break the timer APIs.
+ */
+static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
+{
+       u64 tb, wdt_tb, wdt_ticks = 0;
+       u64 nr_jiffies = 0;
+       u32 period = TCR_GET_WP(vcpu->arch.tcr);
+
+       wdt_tb = 1ULL << (63 - period);
+       tb = get_tb();
+       /*
+        * The watchdog timeout will hapeen when TB bit corresponding
+        * to watchdog will toggle from 0 to 1.
+        */
+       if (tb & wdt_tb)
+               wdt_ticks = wdt_tb;
+
+       wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
+
+       /* Convert timebase ticks to jiffies */
+       nr_jiffies = wdt_ticks;
+
+       if (do_div(nr_jiffies, tb_ticks_per_jiffy))
+               nr_jiffies++;
+
+       return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
+}
+
+static void arm_next_watchdog(struct kvm_vcpu *vcpu)
+{
+       unsigned long nr_jiffies;
+       unsigned long flags;
+
+       /*
+        * If TSR_ENW and TSR_WIS are not set then no need to exit to
+        * userspace, so clear the KVM_REQ_WATCHDOG request.
+        */
+       if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
+               clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
+
+       spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
+       nr_jiffies = watchdog_next_timeout(vcpu);
+       /*
+        * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
+        * then do not run the watchdog timer as this can break timer APIs.
+        */
+       if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
+               mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
+       else
+               del_timer(&vcpu->arch.wdt_timer);
+       spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
+}
+
+void kvmppc_watchdog_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       u32 tsr, new_tsr;
+       int final;
+
+       do {
+               new_tsr = tsr = vcpu->arch.tsr;
+               final = 0;
+
+               /* Time out event */
+               if (tsr & TSR_ENW) {
+                       if (tsr & TSR_WIS)
+                               final = 1;
+                       else
+                               new_tsr = tsr | TSR_WIS;
+               } else {
+                       new_tsr = tsr | TSR_ENW;
+               }
+       } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
+
+       if (new_tsr & TSR_WIS) {
+               smp_wmb();
+               kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+               kvm_vcpu_kick(vcpu);
+       }
+
+       /*
+        * If this is final watchdog expiry and some action is required
+        * then exit to userspace.
+        */
+       if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
+           vcpu->arch.watchdog_enabled) {
+               smp_wmb();
+               kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
+               kvm_vcpu_kick(vcpu);
+       }
+
+       /*
+        * Stop running the watchdog timer after final expiration to
+        * prevent the host from being flooded with timers if the
+        * guest sets a short period.
+        * Timers will resume when TSR/TCR is updated next time.
+        */
+       if (!final)
+               arm_next_watchdog(vcpu);
+}
+
 static void update_timer_ints(struct kvm_vcpu *vcpu)
 {
        if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
                kvmppc_core_queue_dec(vcpu);
        else
                kvmppc_core_dequeue_dec(vcpu);
+
+       if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
+               kvmppc_core_queue_watchdog(vcpu);
+       else
+               kvmppc_core_dequeue_watchdog(vcpu);
 }
 
 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
@@ -466,6 +586,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
                kvmppc_core_flush_tlb(vcpu);
 #endif
 
+       if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
+               vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
+               r = 0;
+       }
+
        return r;
 }
 
@@ -995,6 +1120,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        return r;
 }
 
+int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       /* setup watchdog timer once */
+       spin_lock_init(&vcpu->arch.wdt_lock);
+       setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
+                   (unsigned long)vcpu);
+
+       return 0;
+}
+
+void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       del_timer_sync(&vcpu->arch.wdt_timer);
+}
+
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        int i;
@@ -1090,7 +1230,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
        }
 
        if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
+               u32 old_tsr = vcpu->arch.tsr;
+
                vcpu->arch.tsr = sregs->u.e.tsr;
+
+               if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
+                       arm_next_watchdog(vcpu);
+
                update_timer_ints(vcpu);
        }
 
@@ -1251,6 +1397,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
 {
        vcpu->arch.tcr = new_tcr;
+       arm_next_watchdog(vcpu);
        update_timer_ints(vcpu);
 }
 
@@ -1265,6 +1412,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
 {
        clear_bits(tsr_bits, &vcpu->arch.tsr);
+
+       /*
+        * We may have stopped the watchdog due to
+        * being stuck on final expiration.
+        */
+       if (tsr_bits & (TSR_ENW | TSR_WIS))
+               arm_next_watchdog(vcpu);
+
        update_timer_ints(vcpu);
 }
 
index 12834bb608aba1e41653fa6aaa96f0735d587b80..5a66ade7fd17d264fa722fa774dafcf1e3f66545 100644 (file)
@@ -145,6 +145,14 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
                kvmppc_clr_tsr_bits(vcpu, spr_val);
                break;
        case SPRN_TCR:
+               /*
+                * WRC is a 2-bit field that is supposed to preserve its
+                * value once written to non-zero.
+                */
+               if (vcpu->arch.tcr & TCR_WRC_MASK) {
+                       spr_val &= ~TCR_WRC_MASK;
+                       spr_val |= vcpu->arch.tcr & TCR_WRC_MASK;
+               }
                kvmppc_set_tcr(vcpu, spr_val);
                break;
 
index 54b12af577d05245541f8ec95b5e56900b586b0c..0ffd7d17adc757e11fa935245199ac8fcc5a5792 100644 (file)
@@ -300,6 +300,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        switch (ext) {
 #ifdef CONFIG_BOOKE
        case KVM_CAP_PPC_BOOKE_SREGS:
+       case KVM_CAP_PPC_BOOKE_WATCHDOG:
 #else
        case KVM_CAP_PPC_SEGSTATE:
        case KVM_CAP_PPC_HIOR:
@@ -476,6 +477,8 @@ enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
+       int ret;
+
        hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
        tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
@@ -484,13 +487,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_KVM_EXIT_TIMING
        mutex_init(&vcpu->arch.exit_timing_lock);
 #endif
-
-       return 0;
+       ret = kvmppc_subarch_vcpu_init(vcpu);
+       return ret;
 }
 
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        kvmppc_mmu_destroy(vcpu);
+       kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -735,6 +739,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                r = 0;
                vcpu->arch.papr_enabled = true;
                break;
+#ifdef CONFIG_BOOKE
+       case KVM_CAP_PPC_BOOKE_WATCHDOG:
+               r = 0;
+               vcpu->arch.watchdog_enabled = true;
+               break;
+#endif
 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
        case KVM_CAP_SW_TLB: {
                struct kvm_config_tlb cfg;
index 6be840aafecb0804506891e465e5af0bcf3bc32b..41a9fd5c61d8a0f116967aa76e4d9ff4165d1785 100644 (file)
@@ -167,6 +167,7 @@ struct kvm_pit_config {
 #define KVM_EXIT_OSI              18
 #define KVM_EXIT_PAPR_HCALL      19
 #define KVM_EXIT_S390_UCONTROL   20
+#define KVM_EXIT_WATCHDOG         21
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 #define KVM_INTERNAL_ERROR_EMULATION 1
@@ -627,6 +628,7 @@ struct kvm_ppc_smmu_info {
 #ifdef __KVM_HAVE_READONLY_MEM
 #define KVM_CAP_READONLY_MEM 81
 #endif
+#define KVM_CAP_PPC_BOOKE_WATCHDOG 82
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 80bfc880921ea1fd2fe183e9aeb21ea1e4afca09..e1f1d3eb2768bc2cf7c1c21b72d0f04515f65431 100644 (file)
@@ -118,6 +118,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_IMMEDIATE_EXIT    15
 #define KVM_REQ_PMU               16
 #define KVM_REQ_PMI               17
+#define KVM_REQ_WATCHDOG          18
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID    0