ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
-extern void kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
kvmppc_giveup_ext(vcpu, MSR_VSX);
}
-void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
+ int r = 1; /* Indicate we want to get back into the guest */
+
/* We misuse TLB_FLUSH to indicate that we want to clear
all shadow cache entries */
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return r;
}
/************* MMU Notifiers *************/
return r;
}
-void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
+ int r = 1; /* Indicate we want to get back into the guest */
+
if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
update_timer_ints(vcpu);
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvmppc_core_flush_tlb(vcpu);
#endif
+
+ return r;
}
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Make sure we process requests preemptable */
local_irq_enable();
trace_kvm_check_requests(vcpu);
- kvmppc_core_check_requests(vcpu);
+ r = kvmppc_core_check_requests(vcpu);
local_irq_disable();
- continue;
+ if (r > 0)
+ continue;
+ break;
}
if (kvmppc_core_prepare_to_enter(vcpu)) {