MSR_KVM_ASYNC_PF_EN: 0x4b564d02
data: Bits 63-6 hold 64-byte aligned physical address of a
64 byte memory area which must be in guest RAM and must be
- zeroed. Bits 5-1 are reserved and should be zero. Bit 0 is 1
+ zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when
- disabled.
+ disabled. Bit 2 is 1 if asynchronous page faults can be injected
+ when vcpu is in cpl == 0.
First 4 byte of 64 byte memory location will be written to by
the hypervisor at the time of asynchronous page fault (APF)
struct gfn_to_hva_cache data;
u64 msr_val;
u32 id;
+ bool send_user_only;
} apf;
};
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
u64 pa = __pa(&__get_cpu_var(apf_reason));
+#ifdef CONFIG_PREEMPT
+ pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
__get_cpu_var(apf_reason).enabled = 1;
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
{
gpa_t gpa = data & ~0x3f;
- /* Bits 1:5 are resrved, Should be zero */
- if (data & 0x3e)
+ /* Bits 2:5 are resrved, Should be zero */
+ if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
+ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}