]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/s390/kvm/kvm-s390.c
KVM: s390: don't load kvm without virtualization support
[karo-tx-linux.git] / arch / s390 / kvm / kvm-s390.c
index 2ba5978829f6d8a81fb461b34fa1305996faf314..49d33190bd98746202bbb96c6d39ea0b35f11a3c 100644 (file)
@@ -1266,41 +1266,36 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
                struct esca_block *sca = vcpu->kvm->arch.sca;
 
                clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
-               if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
-                       sca->cpu[vcpu->vcpu_id].sda = 0;
+               sca->cpu[vcpu->vcpu_id].sda = 0;
        } else {
                struct bsca_block *sca = vcpu->kvm->arch.sca;
 
                clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
-               if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
-                       sca->cpu[vcpu->vcpu_id].sda = 0;
+               sca->cpu[vcpu->vcpu_id].sda = 0;
        }
        read_unlock(&vcpu->kvm->arch.sca_lock);
 }
 
-static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
-                       unsigned int id)
+static void sca_add_vcpu(struct kvm_vcpu *vcpu)
 {
-       read_lock(&kvm->arch.sca_lock);
-       if (kvm->arch.use_esca) {
-               struct esca_block *sca = kvm->arch.sca;
+       read_lock(&vcpu->kvm->arch.sca_lock);
+       if (vcpu->kvm->arch.use_esca) {
+               struct esca_block *sca = vcpu->kvm->arch.sca;
 
-               if (!sca->cpu[id].sda)
-                       sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+               sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
                vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
                vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
                vcpu->arch.sie_block->ecb2 |= 0x04U;
-               set_bit_inv(id, (unsigned long *) sca->mcn);
+               set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
        } else {
-               struct bsca_block *sca = kvm->arch.sca;
+               struct bsca_block *sca = vcpu->kvm->arch.sca;
 
-               if (!sca->cpu[id].sda)
-                       sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+               sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
                vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
                vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
-               set_bit_inv(id, (unsigned long *) &sca->mcn);
+               set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
        }
-       read_unlock(&kvm->arch.sca_lock);
+       read_unlock(&vcpu->kvm->arch.sca_lock);
 }
 
 /* Basic SCA to Extended SCA data copy routines */
@@ -1496,7 +1491,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        mutex_unlock(&vcpu->kvm->lock);
        if (!kvm_is_ucontrol(vcpu->kvm)) {
                vcpu->arch.gmap = vcpu->kvm->arch.gmap;
-               sca_add_vcpu(vcpu, vcpu->kvm, vcpu->vcpu_id);
+               sca_add_vcpu(vcpu);
        }
 
 }
@@ -1593,7 +1588,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        struct sie_page *sie_page;
        int rc = -EINVAL;
 
-       if (!sca_can_add_vcpu(kvm, id))
+       if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
                goto out;
 
        rc = -ENOMEM;
@@ -2864,6 +2859,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 
 static int __init kvm_s390_init(void)
 {
+       if (!sclp.has_sief2) {
+               pr_info("SIE not available\n");
+               return -ENODEV;
+       }
+
        return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
 }