#include <asm-generic/bitops/le.h>
#include "coalesced_mmio.h"
+#include "async_pf.h"
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
init_waitqueue_head(&vcpu->wq);
+ kvm_async_pf_vcpu_init(vcpu);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
return ERR_PTR(r);
}
+static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ if (!memslot->dirty_bitmap)
+ return;
+
+ if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
+ vfree(memslot->dirty_bitmap_head);
+ else
+ kfree(memslot->dirty_bitmap_head);
+
+ memslot->dirty_bitmap = NULL;
+ memslot->dirty_bitmap_head = NULL;
+}
+
/*
* Free any memory in @free but not in @dont.
*/
vfree(free->rmap);
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
- vfree(free->dirty_bitmap);
+ kvm_destroy_dirty_bitmap(free);
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
}
free->npages = 0;
- free->dirty_bitmap = NULL;
free->rmap = NULL;
}
return 0;
}
+/*
+ * Allocation size is twice as large as the actual dirty bitmap size.
+ * This makes it possible to do double buffering: see x86's
+ * kvm_vm_ioctl_get_dirty_log().
+ */
+static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
+
+ if (dirty_bytes > PAGE_SIZE)
+ memslot->dirty_bitmap = vzalloc(dirty_bytes);
+ else
+ memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
+
+ if (!memslot->dirty_bitmap)
+ return -ENOMEM;
+
+ memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+ return 0;
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
-
- new.dirty_bitmap = vmalloc(dirty_bytes);
- if (!new.dirty_bitmap)
+ if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
- memset(new.dirty_bitmap, 0, dirty_bytes);
/* destroy any largepage mappings for dirty tracking */
if (old.npages)
flush_shadow = 1;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
+ slots->generation++;
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
old_memslots = kvm->memslots;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
+ slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
+ gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
}
return NULL;
}
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_memslot(kvm_memslots(kvm), gfn);
+}
EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
return memslot - slots->memslots;
}
-static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
+static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{
- struct kvm_memory_slot *slot;
-
- slot = gfn_to_memslot(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
- return gfn_to_hva_many(kvm, gfn, NULL);
+ return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
+static pfn_t get_fault_pfn(void)
+{
+ get_page(fault_page);
+ return fault_pfn;
+}
+
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
+ bool *async, bool write_fault, bool *writable)
{
struct page *page[1];
- int npages;
+ int npages = 0;
pfn_t pfn;
- if (atomic)
+ /* we can do it either atomically or asynchronously, not both */
+ BUG_ON(atomic && async);
+
+ BUG_ON(!write_fault && !writable);
+
+ if (writable)
+ *writable = true;
+
+ if (atomic || async)
npages = __get_user_pages_fast(addr, 1, 1, page);
- else {
+
+ if (unlikely(npages != 1) && !atomic) {
might_sleep();
- npages = get_user_pages_fast(addr, 1, 1, page);
+
+ if (writable)
+ *writable = write_fault;
+
+ npages = get_user_pages_fast(addr, 1, write_fault, page);
+
+ /* map read fault as writable if possible */
+ if (unlikely(!write_fault) && npages == 1) {
+ struct page *wpage[1];
+
+ npages = __get_user_pages_fast(addr, 1, 1, wpage);
+ if (npages == 1) {
+ *writable = true;
+ put_page(page[0]);
+ page[0] = wpage[0];
+ }
+ npages = 1;
+ }
}
if (unlikely(npages != 1)) {
struct vm_area_struct *vma;
if (atomic)
- goto return_fault_page;
+ return get_fault_pfn();
down_read(¤t->mm->mmap_sem);
if (is_hwpoison_address(addr)) {
return page_to_pfn(hwpoison_page);
}
- vma = find_vma(current->mm, addr);
-
- if (vma == NULL || addr < vma->vm_start ||
- !(vma->vm_flags & VM_PFNMAP)) {
- up_read(¤t->mm->mmap_sem);
-return_fault_page:
- get_page(fault_page);
- return page_to_pfn(fault_page);
+ vma = find_vma_intersection(current->mm, addr, addr+1);
+
+ if (vma == NULL)
+ pfn = get_fault_pfn();
+ else if ((vma->vm_flags & VM_PFNMAP)) {
+ pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ BUG_ON(!kvm_is_mmio_pfn(pfn));
+ } else {
+ if (async && (vma->vm_flags & VM_WRITE))
+ *async = true;
+ pfn = get_fault_pfn();
}
-
- pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
up_read(¤t->mm->mmap_sem);
- BUG_ON(!kvm_is_mmio_pfn(pfn));
} else
pfn = page_to_pfn(page[0]);
pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
{
- return hva_to_pfn(kvm, addr, true);
+ return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
-static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
+static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
+ bool write_fault, bool *writable)
{
unsigned long addr;
+ if (async)
+ *async = false;
+
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr)) {
get_page(bad_page);
return page_to_pfn(bad_page);
}
- return hva_to_pfn(kvm, addr, atomic);
+ return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
}
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, true);
+ return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+ bool write_fault, bool *writable)
+{
+ return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
+
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, false);
+ return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+ bool *writable)
+{
+ return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
+
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn)
{
unsigned long addr = gfn_to_hva_memslot(slot, gfn);
- return hva_to_pfn(kvm, addr, false);
+ return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
}
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
unsigned long addr;
gfn_t entry;
- addr = gfn_to_hva_many(kvm, gfn, &entry);
+ addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
return 0;
}
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int offset = offset_in_page(gpa);
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+
+ ghc->gpa = gpa;
+ ghc->generation = slots->generation;
+ ghc->memslot = __gfn_to_memslot(slots, gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+ if (!kvm_is_error_hva(ghc->hva))
+ ghc->hva += offset;
+ else
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
+ if (slots->generation != ghc->generation)
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+
+ r = copy_to_user((void __user *)ghc->hva, data, len);
+ if (r)
+ return -EFAULT;
+ mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
-void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ gfn_t gfn)
{
- struct kvm_memory_slot *memslot;
-
- memslot = gfn_to_memslot(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
}
}
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *memslot;
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
+}
+
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
if (arg)
goto out;
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
+ trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
case KVM_GET_REGS: {
struct kvm_regs *kvm_regs;
goto out_free_5;
}
+ r = kvm_async_pf_init();
+ if (r)
+ goto out_free;
+
kvm_chardev_ops.owner = module;
kvm_vm_fops.owner = module;
kvm_vcpu_fops.owner = module;
r = misc_register(&kvm_dev);
if (r) {
printk(KERN_ERR "kvm: misc device register failed\n");
- goto out_free;
+ goto out_unreg;
}
kvm_preempt_ops.sched_in = kvm_sched_in;
return 0;
+out_unreg:
+ kvm_async_pf_deinit();
out_free:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_5:
kvm_exit_debug();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
+ kvm_async_pf_deinit();
sysdev_unregister(&kvm_sysdev);
sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier);