]> git.karo-electronics.de Git - linux-beck.git/commitdiff
KVM: s390: backup the currently enabled gmap when scheduled out
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Wed, 11 Mar 2015 15:47:33 +0000 (16:47 +0100)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Mon, 20 Jun 2016 07:55:24 +0000 (09:55 +0200)
Nested virtualization will have to enable own gmaps. Current code
would enable the wrong gmap whenever scheduled out and back in,
therefore resulting in the wrong gmap being enabled.

This patch reenables the last enabled gmap, therefore avoiding having to
touch vcpu->arch.gmap when enabling a different gmap.

Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/include/asm/gmap.h
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/kvm-s390.c
arch/s390/mm/gmap.c

index c67fb854705ee0642560c0c9d30ba1a24549cc56..741ddba0bf11b547a791fc41a092e426c4f9ffef 100644 (file)
@@ -94,6 +94,7 @@ void gmap_put(struct gmap *gmap);
 
 void gmap_enable(struct gmap *gmap);
 void gmap_disable(struct gmap *gmap);
+struct gmap *gmap_get_enabled(void);
 int gmap_map_segment(struct gmap *gmap, unsigned long from,
                     unsigned long to, unsigned long len);
 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
index 9eed5c18a61c390c2494fae189426661ed11b014..96bef30e2e33f4dff17b19537a7d88f810aec8da 100644 (file)
@@ -551,6 +551,8 @@ struct kvm_vcpu_arch {
        struct hrtimer    ckc_timer;
        struct kvm_s390_pgm_info pgm;
        struct gmap *gmap;
+       /* backup location for the currently enabled gmap when scheduled out */
+       struct gmap *enabled_gmap;
        struct kvm_guestdbg_info_arch guestdbg;
        unsigned long pfault_token;
        unsigned long pfault_select;
index 45a8316ba1ebe6124962b756dac2d8510215398e..a890f7d207115a7b4ff7179ed2889fac67117b04 100644 (file)
@@ -1719,7 +1719,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
-       gmap_enable(vcpu->arch.gmap);
+       gmap_enable(vcpu->arch.enabled_gmap);
        atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
                __start_cpu_timer_accounting(vcpu);
@@ -1732,7 +1732,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
                __stop_cpu_timer_accounting(vcpu);
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
-       gmap_disable(vcpu->arch.gmap);
+       vcpu->arch.enabled_gmap = gmap_get_enabled();
+       gmap_disable(vcpu->arch.enabled_gmap);
 
        /* Save guest register state */
        save_fpu_regs();
@@ -1781,7 +1782,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
                vcpu->arch.gmap = vcpu->kvm->arch.gmap;
                sca_add_vcpu(vcpu);
        }
-
+       /* make vcpu_load load the right gmap on the first trigger */
+       vcpu->arch.enabled_gmap = vcpu->arch.gmap;
 }
 
 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
index 738d75495e5641179e1904d84ffcac753e84105b..af0ae6d7ac59fd8c5df92bfba714f3456b16c880 100644 (file)
@@ -270,6 +270,17 @@ void gmap_disable(struct gmap *gmap)
 }
 EXPORT_SYMBOL_GPL(gmap_disable);
 
+/**
+ * gmap_get_enabled - get a pointer to the currently enabled gmap
+ *
+ * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
+ */
+struct gmap *gmap_get_enabled(void)
+{
+       return (struct gmap *) S390_lowcore.gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_get_enabled);
+
 /*
  * gmap_alloc_table is assumed to be called with mmap_sem held
  */