]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: MMU: limit rmap chain length
authorMarcelo Tosatti <mtosatti@redhat.com>
Tue, 1 Sep 2009 15:15:13 +0000 (12:15 -0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 9 Sep 2009 03:33:37 +0000 (20:33 -0700)
(cherry picked from commit 53a27b39ff4d2492f84b1fdc2f0047175f0b0b93)

Otherwise the host can spend too long traversing an rmap chain, which
happens under a spinlock.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/kvm/mmu.c

index 6d66a6ff54432e338a93990ab4cecff6217dfbbe..d7ce26babbbeb06a2ece2e04b0a25c2c6714a74f 100644 (file)
@@ -490,16 +490,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
  *
  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  * containing more mappings.
+ *
+ * Returns the number of rmap entries before the spte was added or zero if
+ * the spte was not added.
+ *
  */
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
+static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
 {
        struct kvm_mmu_page *sp;
        struct kvm_rmap_desc *desc;
        unsigned long *rmapp;
-       int i;
+       int i, count = 0;
 
        if (!is_rmap_pte(*spte))
-               return;
+               return count;
        gfn = unalias_gfn(vcpu->kvm, gfn);
        sp = page_header(__pa(spte));
        sp->gfns[spte - sp->spt] = gfn;
@@ -516,8 +520,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
        } else {
                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
-               while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
+               while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) {
                        desc = desc->more;
+                       count += RMAP_EXT;
+               }
                if (desc->shadow_ptes[RMAP_EXT-1]) {
                        desc->more = mmu_alloc_rmap_desc(vcpu);
                        desc = desc->more;
@@ -526,6 +532,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
                        ;
                desc->shadow_ptes[i] = spte;
        }
+       return count;
 }
 
 static void rmap_desc_remove_entry(unsigned long *rmapp,
@@ -755,6 +762,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
        return young;
 }
 
+#define RMAP_RECYCLE_THRESHOLD 1000
+
+static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
+{
+       unsigned long *rmapp;
+
+       gfn = unalias_gfn(vcpu->kvm, gfn);
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
+
+       kvm_unmap_rmapp(vcpu->kvm, rmapp);
+       kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 {
        return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
@@ -1771,6 +1791,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
+       int rmap_count;
 
        pgprintk("%s: spte %llx access %x write_fault %d"
                 " user_fault %d gfn %lx\n",
@@ -1812,9 +1833,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 
        page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
        if (!was_rmapped) {
-               rmap_add(vcpu, shadow_pte, gfn, largepage);
+               rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
                if (!is_rmap_pte(*shadow_pte))
                        kvm_release_pfn_clean(pfn);
+               if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+                       rmap_recycle(vcpu, gfn, largepage);
        } else {
                if (was_writeble)
                        kvm_release_pfn_dirty(pfn);