return kvm;
}
-static void kvm_free_userspace_physmem(struct kvm_memory_slot *free)
-{
- int i;
-
- for (i = 0; i < free->npages; ++i) {
- if (free->phys_mem[i]) {
- if (!PageReserved(free->phys_mem[i]))
- SetPageDirty(free->phys_mem[i]);
- page_cache_release(free->phys_mem[i]);
- }
- }
-}
-
static void kvm_free_kernel_physmem(struct kvm_memory_slot *free)
{
int i;
{
if (!dont || free->phys_mem != dont->phys_mem)
if (free->phys_mem) {
- if (free->user_alloc)
- kvm_free_userspace_physmem(free);
- else
+ if (!free->user_alloc)
kvm_free_kernel_physmem(free);
vfree(free->phys_mem);
}
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
if (vcpu->pio.guest_pages[i]) {
- __free_page(vcpu->pio.guest_pages[i]);
+ kvm_release_page(vcpu->pio.guest_pages[i]);
vcpu->pio.guest_pages[i] = NULL;
}
}
memset(new.phys_mem, 0, npages * sizeof(struct page *));
memset(new.rmap, 0, npages * sizeof(*new.rmap));
if (user_alloc) {
- unsigned long pages_num;
-
new.user_alloc = 1;
- down_read(¤t->mm->mmap_sem);
-
- pages_num = get_user_pages(current, current->mm,
- mem->userspace_addr,
- npages, 1, 1, new.phys_mem,
- NULL);
-
- up_read(¤t->mm->mmap_sem);
- if (pages_num != npages)
- goto out_unlock;
+ new.userspace_addr = mem->userspace_addr;
} else {
for (i = 0; i < npages; ++i) {
new.phys_mem[i] = alloc_page(GFP_HIGHUSER
gfn = unalias_gfn(kvm, gfn);
slot = __gfn_to_memslot(kvm, gfn);
- if (!slot)
+ if (!slot) {
+ get_page(bad_page);
return bad_page;
+ }
+ if (slot->user_alloc) {
+ struct page *page[1];
+ int npages;
+
+ down_read(¤t->mm->mmap_sem);
+ npages = get_user_pages(current, current->mm,
+ slot->userspace_addr
+ + (gfn - slot->base_gfn) * PAGE_SIZE, 1,
+ 1, 1, page, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (npages != 1) {
+ get_page(bad_page);
+ return bad_page;
+ }
+ return page[0];
+ }
+ get_page(slot->phys_mem[gfn - slot->base_gfn]);
return slot->phys_mem[gfn - slot->base_gfn];
}
EXPORT_SYMBOL_GPL(gfn_to_page);
+void kvm_release_page(struct page *page)
+{
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ put_page(page);
+}
+EXPORT_SYMBOL_GPL(kvm_release_page);
+
static int next_segment(unsigned long len, int offset)
{
if (len > PAGE_SIZE - offset)
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (is_error_page(page))
+ if (is_error_page(page)) {
+ kvm_release_page(page);
return -EFAULT;
+ }
page_virt = kmap_atomic(page, KM_USER0);
memcpy(data, page_virt + offset, len);
kunmap_atomic(page_virt, KM_USER0);
+ kvm_release_page(page);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (is_error_page(page))
+ if (is_error_page(page)) {
+ kvm_release_page(page);
return -EFAULT;
+ }
page_virt = kmap_atomic(page, KM_USER0);
memcpy(page_virt + offset, data, len);
kunmap_atomic(page_virt, KM_USER0);
mark_page_dirty(kvm, gfn);
+ kvm_release_page(page);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
struct page *page;
page = gfn_to_page(kvm, gfn);
- if (is_error_page(page))
+ if (is_error_page(page)) {
+ kvm_release_page(page);
return -EFAULT;
+ }
page_virt = kmap_atomic(page, KM_USER0);
memset(page_virt + offset, 0, len);
kunmap_atomic(page_virt, KM_USER0);
+ kvm_release_page(page);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
for (i = 0; i < nr_pages; ++i) {
mutex_lock(&vcpu->kvm->lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
- if (page)
- get_page(page);
vcpu->pio.guest_pages[i] = page;
mutex_unlock(&vcpu->kvm->lock);
if (!page) {
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
page = gfn_to_page(kvm, pgoff);
- if (is_error_page(page))
+ if (is_error_page(page)) {
+ kvm_release_page(page);
return NOPAGE_SIGBUS;
- get_page(page);
+ }
if (type != NULL)
*type = VM_FAULT_MINOR;
if (!is_rmap_pte(*spte))
return;
page = page_header(__pa(spte));
+ kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
+ PAGE_SHIFT));
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
PT_USER_MASK;
if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
+ else
+ kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
return 0;
}
1, 3, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
+ kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
return -ENOMEM;
}
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
- if (is_error_hpa(paddr))
+ if (is_error_hpa(paddr)) {
+ kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return 1;
+ }
return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
}
} else {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+ struct page *page;
if (is_shadow_present_pte(ent)
&& (ent & PT64_BASE_ADDR_MASK) != hpa)
&& !is_error_hpa(hpa))
printk(KERN_ERR "audit: (%s) notrap shadow,"
" valid guest gva %lx\n", audit_msg, va);
+ page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT);
+ kvm_release_page(page);
}
}
struct kvm_vcpu *vcpu, gva_t addr,
int write_fault, int user_fault, int fetch_fault)
{
- struct page *page;
+ struct page *page = NULL;
pt_element_t *table;
pt_element_t pte;
gfn_t table_gfn;
walker->inherited_ar &= pte;
--walker->level;
+ kvm_release_page(page);
}
if (write_fault && !is_dirty_pte(pte)) {
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
}
+ kvm_release_page(page);
walker->pte = pte;
pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
return 1;
walker->error_code |= PFERR_USER_MASK;
if (fetch_fault)
walker->error_code |= PFERR_FETCH_MASK;
+ if (page)
+ kvm_release_page(page);
return 0;
}
if (is_error_hpa(paddr)) {
set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
+ kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return;
}
pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
set_shadow_pte(shadow_pte, spte);
page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
- if (!was_rmapped)
+ if (!was_rmapped) {
rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
+ if (!is_rmap_pte(*shadow_pte)) {
+ struct page *page;
+
+ page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT);
+ kvm_release_page(page);
+ }
+ }
+ else
+ kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte;
}
{
int i;
pt_element_t *gpt;
+ struct page *page;
if (sp->role.metaphysical || PTTYPE == 32) {
nonpaging_prefetch_page(vcpu, sp);
return;
}
- gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
+ page = gfn_to_page(vcpu->kvm, sp->gfn);
+ gpt = kmap_atomic(page, KM_USER0);
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
if (is_present_pte(gpt[i]))
sp->spt[i] = shadow_trap_nonpresent_pte;
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0);
+ kvm_release_page(page);
}
#undef pt_element_t