#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
- #define shadow_walker shadow_walker64
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
#elif PTTYPE == 32
#define pt_element_t u32
#define guest_walker guest_walker32
- #define shadow_walker shadow_walker32
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
u32 error_code;
};
-struct shadow_walker {
- struct kvm_shadow_walk walker;
- struct guest_walker *guest_walker;
- int user_fault;
- int write_fault;
- int largepage;
- int *ptwrite;
- pfn_t pfn;
- u64 *sptep;
- gpa_t pte_gpa;
-};
-
static gfn_t gpte_to_gfn(pt_element_t gpte)
{
return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
unsigned access = gw->pt_access;
struct kvm_mmu_page *shadow_page;
u64 spte, *sptep;
- int metaphysical;
+ int direct;
gfn_t table_gfn;
int r;
int level;
continue;
if (is_large_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
kvm_flush_remote_tlbs(vcpu->kvm);
- rmap_remove(vcpu->kvm, sptep);
}
if (level == PT_DIRECTORY_LEVEL
&& gw->level == PT_DIRECTORY_LEVEL) {
- metaphysical = 1;
+ direct = 1;
if (!is_dirty_pte(gw->ptes[level - 1]))
access &= ~ACC_WRITE_MASK;
table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
} else {
- metaphysical = 0;
+ direct = 0;
table_gfn = gw->table_gfn[level - 2];
}
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
- metaphysical, access, sptep);
- if (!metaphysical) {
+ direct, access, sptep);
+ if (!direct) {
r = kvm_read_guest_atomic(vcpu->kvm,
gw->pte_gpa[level - 2],
&curr_pte, sizeof(curr_pte));
return 0;
}
-static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
- struct kvm_vcpu *vcpu, u64 addr,
- u64 *sptep, int level)
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
- struct shadow_walker *sw =
- container_of(_sw, struct shadow_walker, walker);
+ struct kvm_shadow_walk_iterator iterator;
+ pt_element_t gpte;
+ gpa_t pte_gpa = -1;
+ int level;
+ u64 *sptep;
+ int need_flush = 0;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
- /* FIXME: properly handle invlpg on large guest pages */
- if (level == PT_PAGE_TABLE_LEVEL ||
- ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ for_each_shadow_entry(vcpu, gva, iterator) {
+ level = iterator.level;
+ sptep = iterator.sptep;
- sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
- sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+ /* FIXME: properly handle invlpg on large guest pages */
+ if (level == PT_PAGE_TABLE_LEVEL ||
+ ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
- if (is_shadow_present_pte(*sptep)) {
- rmap_remove(vcpu->kvm, sptep);
- if (is_large_pte(*sptep))
- --vcpu->kvm->stat.lpages;
+ pte_gpa = (sp->gfn << PAGE_SHIFT);
+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+
+ if (is_shadow_present_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
+ if (is_large_pte(*sptep))
+ --vcpu->kvm->stat.lpages;
+ need_flush = 1;
+ }
+ set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ break;
}
- set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
- return 1;
- }
- if (!is_shadow_present_pte(*sptep))
- return 1;
- return 0;
-}
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
-{
- pt_element_t gpte;
- struct shadow_walker walker = {
- .walker = { .entry = FNAME(shadow_invlpg_entry), },
- .pte_gpa = -1,
- };
+ if (!is_shadow_present_pte(*sptep))
+ break;
+ }
- spin_lock(&vcpu->kvm->mmu_lock);
- walk_shadow(&walker.walker, vcpu, gva);
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
spin_unlock(&vcpu->kvm->mmu_lock);
- if (walker.pte_gpa == -1)
+
+ if (pte_gpa == -1)
return;
- if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte,
+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
return;
if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
if (mmu_topup_memory_caches(vcpu))
return;
- kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte,
+ kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
sizeof(pt_element_t), 0);
}
}
pt_element_t pt[256 / sizeof(pt_element_t)];
gpa_t pte_gpa;
- if (sp->role.metaphysical
+ if (sp->role.direct
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
nonpaging_prefetch_page(vcpu, sp);
return;
#undef pt_element_t
#undef guest_walker
-#undef shadow_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX