]> git.karo-electronics.de Git - linux-beck.git/blob - arch/s390/kvm/kvm-s390.c
spi: Remove support for legacy PM
[linux-beck.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
31 #include <asm/nmi.h>
32 #include <asm/switch_to.h>
33 #include <asm/sclp.h>
34 #include "kvm-s390.h"
35 #include "gaccess.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 #include "trace-s390.h"
40
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44         { "userspace_handled", VCPU_STAT(exit_userspace) },
45         { "exit_null", VCPU_STAT(exit_null) },
46         { "exit_validity", VCPU_STAT(exit_validity) },
47         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48         { "exit_external_request", VCPU_STAT(exit_external_request) },
49         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50         { "exit_instruction", VCPU_STAT(exit_instruction) },
51         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53         { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
54         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
55         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
56         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
57         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
59         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
60         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
61         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
68         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
69         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70         { "instruction_spx", VCPU_STAT(instruction_spx) },
71         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72         { "instruction_stap", VCPU_STAT(instruction_stap) },
73         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
74         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
75         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
77         { "instruction_essa", VCPU_STAT(instruction_essa) },
78         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
80         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
81         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
82         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
83         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
84         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
85         { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86         { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
87         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
88         { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89         { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
90         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
93         { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94         { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95         { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
96         { "diagnose_10", VCPU_STAT(diagnose_10) },
97         { "diagnose_44", VCPU_STAT(diagnose_44) },
98         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
99         { NULL }
100 };
101
102 /* upper facilities limit for kvm */
103 unsigned long kvm_s390_fac_list_mask[] = {
104         0xff82fffbf4fc2000UL,
105         0x005c000000000000UL,
106 };
107
108 unsigned long kvm_s390_fac_list_mask_size(void)
109 {
110         BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111         return ARRAY_SIZE(kvm_s390_fac_list_mask);
112 }
113
114 static struct gmap_notifier gmap_notifier;
115
116 /* Section: not file related */
117 int kvm_arch_hardware_enable(void)
118 {
119         /* every s390 is virtualization enabled ;-) */
120         return 0;
121 }
122
123 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
125 int kvm_arch_hardware_setup(void)
126 {
127         gmap_notifier.notifier_call = kvm_gmap_notifier;
128         gmap_register_ipte_notifier(&gmap_notifier);
129         return 0;
130 }
131
132 void kvm_arch_hardware_unsetup(void)
133 {
134         gmap_unregister_ipte_notifier(&gmap_notifier);
135 }
136
137 int kvm_arch_init(void *opaque)
138 {
139         /* Register floating interrupt controller interface. */
140         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
141 }
142
143 /* Section: device related */
144 long kvm_arch_dev_ioctl(struct file *filp,
145                         unsigned int ioctl, unsigned long arg)
146 {
147         if (ioctl == KVM_S390_ENABLE_SIE)
148                 return s390_enable_sie();
149         return -EINVAL;
150 }
151
152 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
153 {
154         int r;
155
156         switch (ext) {
157         case KVM_CAP_S390_PSW:
158         case KVM_CAP_S390_GMAP:
159         case KVM_CAP_SYNC_MMU:
160 #ifdef CONFIG_KVM_S390_UCONTROL
161         case KVM_CAP_S390_UCONTROL:
162 #endif
163         case KVM_CAP_ASYNC_PF:
164         case KVM_CAP_SYNC_REGS:
165         case KVM_CAP_ONE_REG:
166         case KVM_CAP_ENABLE_CAP:
167         case KVM_CAP_S390_CSS_SUPPORT:
168         case KVM_CAP_IRQFD:
169         case KVM_CAP_IOEVENTFD:
170         case KVM_CAP_DEVICE_CTRL:
171         case KVM_CAP_ENABLE_CAP_VM:
172         case KVM_CAP_S390_IRQCHIP:
173         case KVM_CAP_VM_ATTRIBUTES:
174         case KVM_CAP_MP_STATE:
175         case KVM_CAP_S390_USER_SIGP:
176                 r = 1;
177                 break;
178         case KVM_CAP_NR_VCPUS:
179         case KVM_CAP_MAX_VCPUS:
180                 r = KVM_MAX_VCPUS;
181                 break;
182         case KVM_CAP_NR_MEMSLOTS:
183                 r = KVM_USER_MEM_SLOTS;
184                 break;
185         case KVM_CAP_S390_COW:
186                 r = MACHINE_HAS_ESOP;
187                 break;
188         default:
189                 r = 0;
190         }
191         return r;
192 }
193
194 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
195                                         struct kvm_memory_slot *memslot)
196 {
197         gfn_t cur_gfn, last_gfn;
198         unsigned long address;
199         struct gmap *gmap = kvm->arch.gmap;
200
201         down_read(&gmap->mm->mmap_sem);
202         /* Loop over all guest pages */
203         last_gfn = memslot->base_gfn + memslot->npages;
204         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
205                 address = gfn_to_hva_memslot(memslot, cur_gfn);
206
207                 if (gmap_test_and_clear_dirty(address, gmap))
208                         mark_page_dirty(kvm, cur_gfn);
209         }
210         up_read(&gmap->mm->mmap_sem);
211 }
212
213 /* Section: vm related */
214 /*
215  * Get (and clear) the dirty memory log for a memory slot.
216  */
217 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218                                struct kvm_dirty_log *log)
219 {
220         int r;
221         unsigned long n;
222         struct kvm_memory_slot *memslot;
223         int is_dirty = 0;
224
225         mutex_lock(&kvm->slots_lock);
226
227         r = -EINVAL;
228         if (log->slot >= KVM_USER_MEM_SLOTS)
229                 goto out;
230
231         memslot = id_to_memslot(kvm->memslots, log->slot);
232         r = -ENOENT;
233         if (!memslot->dirty_bitmap)
234                 goto out;
235
236         kvm_s390_sync_dirty_log(kvm, memslot);
237         r = kvm_get_dirty_log(kvm, log, &is_dirty);
238         if (r)
239                 goto out;
240
241         /* Clear the dirty log */
242         if (is_dirty) {
243                 n = kvm_dirty_bitmap_bytes(memslot);
244                 memset(memslot->dirty_bitmap, 0, n);
245         }
246         r = 0;
247 out:
248         mutex_unlock(&kvm->slots_lock);
249         return r;
250 }
251
252 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253 {
254         int r;
255
256         if (cap->flags)
257                 return -EINVAL;
258
259         switch (cap->cap) {
260         case KVM_CAP_S390_IRQCHIP:
261                 kvm->arch.use_irqchip = 1;
262                 r = 0;
263                 break;
264         case KVM_CAP_S390_USER_SIGP:
265                 kvm->arch.user_sigp = 1;
266                 r = 0;
267                 break;
268         default:
269                 r = -EINVAL;
270                 break;
271         }
272         return r;
273 }
274
275 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
276 {
277         int ret;
278
279         switch (attr->attr) {
280         case KVM_S390_VM_MEM_LIMIT_SIZE:
281                 ret = 0;
282                 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
283                         ret = -EFAULT;
284                 break;
285         default:
286                 ret = -ENXIO;
287                 break;
288         }
289         return ret;
290 }
291
292 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
293 {
294         int ret;
295         unsigned int idx;
296         switch (attr->attr) {
297         case KVM_S390_VM_MEM_ENABLE_CMMA:
298                 ret = -EBUSY;
299                 mutex_lock(&kvm->lock);
300                 if (atomic_read(&kvm->online_vcpus) == 0) {
301                         kvm->arch.use_cmma = 1;
302                         ret = 0;
303                 }
304                 mutex_unlock(&kvm->lock);
305                 break;
306         case KVM_S390_VM_MEM_CLR_CMMA:
307                 mutex_lock(&kvm->lock);
308                 idx = srcu_read_lock(&kvm->srcu);
309                 s390_reset_cmma(kvm->arch.gmap->mm);
310                 srcu_read_unlock(&kvm->srcu, idx);
311                 mutex_unlock(&kvm->lock);
312                 ret = 0;
313                 break;
314         case KVM_S390_VM_MEM_LIMIT_SIZE: {
315                 unsigned long new_limit;
316
317                 if (kvm_is_ucontrol(kvm))
318                         return -EINVAL;
319
320                 if (get_user(new_limit, (u64 __user *)attr->addr))
321                         return -EFAULT;
322
323                 if (new_limit > kvm->arch.gmap->asce_end)
324                         return -E2BIG;
325
326                 ret = -EBUSY;
327                 mutex_lock(&kvm->lock);
328                 if (atomic_read(&kvm->online_vcpus) == 0) {
329                         /* gmap_alloc will round the limit up */
330                         struct gmap *new = gmap_alloc(current->mm, new_limit);
331
332                         if (!new) {
333                                 ret = -ENOMEM;
334                         } else {
335                                 gmap_free(kvm->arch.gmap);
336                                 new->private = kvm;
337                                 kvm->arch.gmap = new;
338                                 ret = 0;
339                         }
340                 }
341                 mutex_unlock(&kvm->lock);
342                 break;
343         }
344         default:
345                 ret = -ENXIO;
346                 break;
347         }
348         return ret;
349 }
350
351 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352
353 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354 {
355         struct kvm_vcpu *vcpu;
356         int i;
357
358         if (!test_kvm_facility(kvm, 76))
359                 return -EINVAL;
360
361         mutex_lock(&kvm->lock);
362         switch (attr->attr) {
363         case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364                 get_random_bytes(
365                         kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367                 kvm->arch.crypto.aes_kw = 1;
368                 break;
369         case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370                 get_random_bytes(
371                         kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373                 kvm->arch.crypto.dea_kw = 1;
374                 break;
375         case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376                 kvm->arch.crypto.aes_kw = 0;
377                 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379                 break;
380         case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381                 kvm->arch.crypto.dea_kw = 0;
382                 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384                 break;
385         default:
386                 mutex_unlock(&kvm->lock);
387                 return -ENXIO;
388         }
389
390         kvm_for_each_vcpu(i, vcpu, kvm) {
391                 kvm_s390_vcpu_crypto_setup(vcpu);
392                 exit_sie(vcpu);
393         }
394         mutex_unlock(&kvm->lock);
395         return 0;
396 }
397
398 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
399 {
400         u8 gtod_high;
401
402         if (copy_from_user(&gtod_high, (void __user *)attr->addr,
403                                            sizeof(gtod_high)))
404                 return -EFAULT;
405
406         if (gtod_high != 0)
407                 return -EINVAL;
408
409         return 0;
410 }
411
412 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
413 {
414         struct kvm_vcpu *cur_vcpu;
415         unsigned int vcpu_idx;
416         u64 host_tod, gtod;
417         int r;
418
419         if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
420                 return -EFAULT;
421
422         r = store_tod_clock(&host_tod);
423         if (r)
424                 return r;
425
426         mutex_lock(&kvm->lock);
427         kvm->arch.epoch = gtod - host_tod;
428         kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
429                 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
430                 exit_sie(cur_vcpu);
431         }
432         mutex_unlock(&kvm->lock);
433         return 0;
434 }
435
436 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
437 {
438         int ret;
439
440         if (attr->flags)
441                 return -EINVAL;
442
443         switch (attr->attr) {
444         case KVM_S390_VM_TOD_HIGH:
445                 ret = kvm_s390_set_tod_high(kvm, attr);
446                 break;
447         case KVM_S390_VM_TOD_LOW:
448                 ret = kvm_s390_set_tod_low(kvm, attr);
449                 break;
450         default:
451                 ret = -ENXIO;
452                 break;
453         }
454         return ret;
455 }
456
457 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
458 {
459         u8 gtod_high = 0;
460
461         if (copy_to_user((void __user *)attr->addr, &gtod_high,
462                                          sizeof(gtod_high)))
463                 return -EFAULT;
464
465         return 0;
466 }
467
468 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
469 {
470         u64 host_tod, gtod;
471         int r;
472
473         r = store_tod_clock(&host_tod);
474         if (r)
475                 return r;
476
477         gtod = host_tod + kvm->arch.epoch;
478         if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
479                 return -EFAULT;
480
481         return 0;
482 }
483
484 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
485 {
486         int ret;
487
488         if (attr->flags)
489                 return -EINVAL;
490
491         switch (attr->attr) {
492         case KVM_S390_VM_TOD_HIGH:
493                 ret = kvm_s390_get_tod_high(kvm, attr);
494                 break;
495         case KVM_S390_VM_TOD_LOW:
496                 ret = kvm_s390_get_tod_low(kvm, attr);
497                 break;
498         default:
499                 ret = -ENXIO;
500                 break;
501         }
502         return ret;
503 }
504
505 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
506 {
507         struct kvm_s390_vm_cpu_processor *proc;
508         int ret = 0;
509
510         mutex_lock(&kvm->lock);
511         if (atomic_read(&kvm->online_vcpus)) {
512                 ret = -EBUSY;
513                 goto out;
514         }
515         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
516         if (!proc) {
517                 ret = -ENOMEM;
518                 goto out;
519         }
520         if (!copy_from_user(proc, (void __user *)attr->addr,
521                             sizeof(*proc))) {
522                 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523                        sizeof(struct cpuid));
524                 kvm->arch.model.ibc = proc->ibc;
525                 memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
526                        S390_ARCH_FAC_LIST_SIZE_BYTE);
527         } else
528                 ret = -EFAULT;
529         kfree(proc);
530 out:
531         mutex_unlock(&kvm->lock);
532         return ret;
533 }
534
535 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
536 {
537         int ret = -ENXIO;
538
539         switch (attr->attr) {
540         case KVM_S390_VM_CPU_PROCESSOR:
541                 ret = kvm_s390_set_processor(kvm, attr);
542                 break;
543         }
544         return ret;
545 }
546
547 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
548 {
549         struct kvm_s390_vm_cpu_processor *proc;
550         int ret = 0;
551
552         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
553         if (!proc) {
554                 ret = -ENOMEM;
555                 goto out;
556         }
557         memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558         proc->ibc = kvm->arch.model.ibc;
559         memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
560         if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561                 ret = -EFAULT;
562         kfree(proc);
563 out:
564         return ret;
565 }
566
567 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
568 {
569         struct kvm_s390_vm_cpu_machine *mach;
570         int ret = 0;
571
572         mach = kzalloc(sizeof(*mach), GFP_KERNEL);
573         if (!mach) {
574                 ret = -ENOMEM;
575                 goto out;
576         }
577         get_cpu_id((struct cpuid *) &mach->cpuid);
578         mach->ibc = sclp_get_ibc();
579         memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
580                kvm_s390_fac_list_mask_size() * sizeof(u64));
581         memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
582                S390_ARCH_FAC_LIST_SIZE_U64);
583         if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584                 ret = -EFAULT;
585         kfree(mach);
586 out:
587         return ret;
588 }
589
590 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
591 {
592         int ret = -ENXIO;
593
594         switch (attr->attr) {
595         case KVM_S390_VM_CPU_PROCESSOR:
596                 ret = kvm_s390_get_processor(kvm, attr);
597                 break;
598         case KVM_S390_VM_CPU_MACHINE:
599                 ret = kvm_s390_get_machine(kvm, attr);
600                 break;
601         }
602         return ret;
603 }
604
605 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
606 {
607         int ret;
608
609         switch (attr->group) {
610         case KVM_S390_VM_MEM_CTRL:
611                 ret = kvm_s390_set_mem_control(kvm, attr);
612                 break;
613         case KVM_S390_VM_TOD:
614                 ret = kvm_s390_set_tod(kvm, attr);
615                 break;
616         case KVM_S390_VM_CPU_MODEL:
617                 ret = kvm_s390_set_cpu_model(kvm, attr);
618                 break;
619         case KVM_S390_VM_CRYPTO:
620                 ret = kvm_s390_vm_set_crypto(kvm, attr);
621                 break;
622         default:
623                 ret = -ENXIO;
624                 break;
625         }
626
627         return ret;
628 }
629
630 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
631 {
632         int ret;
633
634         switch (attr->group) {
635         case KVM_S390_VM_MEM_CTRL:
636                 ret = kvm_s390_get_mem_control(kvm, attr);
637                 break;
638         case KVM_S390_VM_TOD:
639                 ret = kvm_s390_get_tod(kvm, attr);
640                 break;
641         case KVM_S390_VM_CPU_MODEL:
642                 ret = kvm_s390_get_cpu_model(kvm, attr);
643                 break;
644         default:
645                 ret = -ENXIO;
646                 break;
647         }
648
649         return ret;
650 }
651
652 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653 {
654         int ret;
655
656         switch (attr->group) {
657         case KVM_S390_VM_MEM_CTRL:
658                 switch (attr->attr) {
659                 case KVM_S390_VM_MEM_ENABLE_CMMA:
660                 case KVM_S390_VM_MEM_CLR_CMMA:
661                 case KVM_S390_VM_MEM_LIMIT_SIZE:
662                         ret = 0;
663                         break;
664                 default:
665                         ret = -ENXIO;
666                         break;
667                 }
668                 break;
669         case KVM_S390_VM_TOD:
670                 switch (attr->attr) {
671                 case KVM_S390_VM_TOD_LOW:
672                 case KVM_S390_VM_TOD_HIGH:
673                         ret = 0;
674                         break;
675                 default:
676                         ret = -ENXIO;
677                         break;
678                 }
679                 break;
680         case KVM_S390_VM_CPU_MODEL:
681                 switch (attr->attr) {
682                 case KVM_S390_VM_CPU_PROCESSOR:
683                 case KVM_S390_VM_CPU_MACHINE:
684                         ret = 0;
685                         break;
686                 default:
687                         ret = -ENXIO;
688                         break;
689                 }
690                 break;
691         case KVM_S390_VM_CRYPTO:
692                 switch (attr->attr) {
693                 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
694                 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
695                 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
696                 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
697                         ret = 0;
698                         break;
699                 default:
700                         ret = -ENXIO;
701                         break;
702                 }
703                 break;
704         default:
705                 ret = -ENXIO;
706                 break;
707         }
708
709         return ret;
710 }
711
712 long kvm_arch_vm_ioctl(struct file *filp,
713                        unsigned int ioctl, unsigned long arg)
714 {
715         struct kvm *kvm = filp->private_data;
716         void __user *argp = (void __user *)arg;
717         struct kvm_device_attr attr;
718         int r;
719
720         switch (ioctl) {
721         case KVM_S390_INTERRUPT: {
722                 struct kvm_s390_interrupt s390int;
723
724                 r = -EFAULT;
725                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
726                         break;
727                 r = kvm_s390_inject_vm(kvm, &s390int);
728                 break;
729         }
730         case KVM_ENABLE_CAP: {
731                 struct kvm_enable_cap cap;
732                 r = -EFAULT;
733                 if (copy_from_user(&cap, argp, sizeof(cap)))
734                         break;
735                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
736                 break;
737         }
738         case KVM_CREATE_IRQCHIP: {
739                 struct kvm_irq_routing_entry routing;
740
741                 r = -EINVAL;
742                 if (kvm->arch.use_irqchip) {
743                         /* Set up dummy routing. */
744                         memset(&routing, 0, sizeof(routing));
745                         kvm_set_irq_routing(kvm, &routing, 0, 0);
746                         r = 0;
747                 }
748                 break;
749         }
750         case KVM_SET_DEVICE_ATTR: {
751                 r = -EFAULT;
752                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
753                         break;
754                 r = kvm_s390_vm_set_attr(kvm, &attr);
755                 break;
756         }
757         case KVM_GET_DEVICE_ATTR: {
758                 r = -EFAULT;
759                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
760                         break;
761                 r = kvm_s390_vm_get_attr(kvm, &attr);
762                 break;
763         }
764         case KVM_HAS_DEVICE_ATTR: {
765                 r = -EFAULT;
766                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
767                         break;
768                 r = kvm_s390_vm_has_attr(kvm, &attr);
769                 break;
770         }
771         default:
772                 r = -ENOTTY;
773         }
774
775         return r;
776 }
777
778 static int kvm_s390_query_ap_config(u8 *config)
779 {
780         u32 fcn_code = 0x04000000UL;
781         u32 cc;
782
783         asm volatile(
784                 "lgr 0,%1\n"
785                 "lgr 2,%2\n"
786                 ".long 0xb2af0000\n"            /* PQAP(QCI) */
787                 "ipm %0\n"
788                 "srl %0,28\n"
789                 : "=r" (cc)
790                 : "r" (fcn_code), "r" (config)
791                 : "cc", "0", "2", "memory"
792         );
793
794         return cc;
795 }
796
797 static int kvm_s390_apxa_installed(void)
798 {
799         u8 config[128];
800         int cc;
801
802         if (test_facility(2) && test_facility(12)) {
803                 cc = kvm_s390_query_ap_config(config);
804
805                 if (cc)
806                         pr_err("PQAP(QCI) failed with cc=%d", cc);
807                 else
808                         return config[0] & 0x40;
809         }
810
811         return 0;
812 }
813
814 static void kvm_s390_set_crycb_format(struct kvm *kvm)
815 {
816         kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
817
818         if (kvm_s390_apxa_installed())
819                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
820         else
821                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
822 }
823
824 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
825 {
826         get_cpu_id(cpu_id);
827         cpu_id->version = 0xff;
828 }
829
830 static int kvm_s390_crypto_init(struct kvm *kvm)
831 {
832         if (!test_kvm_facility(kvm, 76))
833                 return 0;
834
835         kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
836                                          GFP_KERNEL | GFP_DMA);
837         if (!kvm->arch.crypto.crycb)
838                 return -ENOMEM;
839
840         kvm_s390_set_crycb_format(kvm);
841
842         /* Disable AES/DEA protected key functions by default */
843         kvm->arch.crypto.aes_kw = 0;
844         kvm->arch.crypto.dea_kw = 0;
845
846         return 0;
847 }
848
849 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
850 {
851         int i, rc;
852         char debug_name[16];
853         static unsigned long sca_offset;
854
855         rc = -EINVAL;
856 #ifdef CONFIG_KVM_S390_UCONTROL
857         if (type & ~KVM_VM_S390_UCONTROL)
858                 goto out_err;
859         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
860                 goto out_err;
861 #else
862         if (type)
863                 goto out_err;
864 #endif
865
866         rc = s390_enable_sie();
867         if (rc)
868                 goto out_err;
869
870         rc = -ENOMEM;
871
872         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
873         if (!kvm->arch.sca)
874                 goto out_err;
875         spin_lock(&kvm_lock);
876         sca_offset = (sca_offset + 16) & 0x7f0;
877         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
878         spin_unlock(&kvm_lock);
879
880         sprintf(debug_name, "kvm-%u", current->pid);
881
882         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
883         if (!kvm->arch.dbf)
884                 goto out_nodbf;
885
886         /*
887          * The architectural maximum amount of facilities is 16 kbit. To store
888          * this amount, 2 kbyte of memory is required. Thus we need a full
889          * page to hold the active copy (arch.model.fac->sie) and the current
890          * facilities set (arch.model.fac->kvm). Its address size has to be
891          * 31 bits and word aligned.
892          */
893         kvm->arch.model.fac =
894                 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
895         if (!kvm->arch.model.fac)
896                 goto out_nofac;
897
898         memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
899                S390_ARCH_FAC_LIST_SIZE_U64);
900
901         /*
902          * If this KVM host runs *not* in a LPAR, relax the facility bits
903          * of the kvm facility mask by all missing facilities. This will allow
904          * to determine the right CPU model by means of the remaining facilities.
905          * Live guest migration must prohibit the migration of KVMs running in
906          * a LPAR to non LPAR hosts.
907          */
908         if (!MACHINE_IS_LPAR)
909                 for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
910                         kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
911
912         /*
913          * Apply the kvm facility mask to limit the kvm supported/tolerated
914          * facility list.
915          */
916         for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
917                 if (i < kvm_s390_fac_list_mask_size())
918                         kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
919                 else
920                         kvm->arch.model.fac->kvm[i] = 0UL;
921         }
922
923         kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
924         kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
925
926         if (kvm_s390_crypto_init(kvm) < 0)
927                 goto out_crypto;
928
929         spin_lock_init(&kvm->arch.float_int.lock);
930         INIT_LIST_HEAD(&kvm->arch.float_int.list);
931         init_waitqueue_head(&kvm->arch.ipte_wq);
932         mutex_init(&kvm->arch.ipte_mutex);
933
934         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
935         VM_EVENT(kvm, 3, "%s", "vm created");
936
937         if (type & KVM_VM_S390_UCONTROL) {
938                 kvm->arch.gmap = NULL;
939         } else {
940                 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
941                 if (!kvm->arch.gmap)
942                         goto out_nogmap;
943                 kvm->arch.gmap->private = kvm;
944                 kvm->arch.gmap->pfault_enabled = 0;
945         }
946
947         kvm->arch.css_support = 0;
948         kvm->arch.use_irqchip = 0;
949         kvm->arch.epoch = 0;
950
951         spin_lock_init(&kvm->arch.start_stop_lock);
952
953         return 0;
954 out_nogmap:
955         kfree(kvm->arch.crypto.crycb);
956 out_crypto:
957         free_page((unsigned long)kvm->arch.model.fac);
958 out_nofac:
959         debug_unregister(kvm->arch.dbf);
960 out_nodbf:
961         free_page((unsigned long)(kvm->arch.sca));
962 out_err:
963         return rc;
964 }
965
966 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
967 {
968         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
969         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
970         kvm_s390_clear_local_irqs(vcpu);
971         kvm_clear_async_pf_completion_queue(vcpu);
972         if (!kvm_is_ucontrol(vcpu->kvm)) {
973                 clear_bit(63 - vcpu->vcpu_id,
974                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
975                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
976                     (__u64) vcpu->arch.sie_block)
977                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
978         }
979         smp_mb();
980
981         if (kvm_is_ucontrol(vcpu->kvm))
982                 gmap_free(vcpu->arch.gmap);
983
984         if (kvm_s390_cmma_enabled(vcpu->kvm))
985                 kvm_s390_vcpu_unsetup_cmma(vcpu);
986         free_page((unsigned long)(vcpu->arch.sie_block));
987
988         kvm_vcpu_uninit(vcpu);
989         kmem_cache_free(kvm_vcpu_cache, vcpu);
990 }
991
992 static void kvm_free_vcpus(struct kvm *kvm)
993 {
994         unsigned int i;
995         struct kvm_vcpu *vcpu;
996
997         kvm_for_each_vcpu(i, vcpu, kvm)
998                 kvm_arch_vcpu_destroy(vcpu);
999
1000         mutex_lock(&kvm->lock);
1001         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1002                 kvm->vcpus[i] = NULL;
1003
1004         atomic_set(&kvm->online_vcpus, 0);
1005         mutex_unlock(&kvm->lock);
1006 }
1007
1008 void kvm_arch_destroy_vm(struct kvm *kvm)
1009 {
1010         kvm_free_vcpus(kvm);
1011         free_page((unsigned long)kvm->arch.model.fac);
1012         free_page((unsigned long)(kvm->arch.sca));
1013         debug_unregister(kvm->arch.dbf);
1014         kfree(kvm->arch.crypto.crycb);
1015         if (!kvm_is_ucontrol(kvm))
1016                 gmap_free(kvm->arch.gmap);
1017         kvm_s390_destroy_adapters(kvm);
1018         kvm_s390_clear_float_irqs(kvm);
1019 }
1020
1021 /* Section: vcpu related */
1022 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1023 {
1024         vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1025         if (!vcpu->arch.gmap)
1026                 return -ENOMEM;
1027         vcpu->arch.gmap->private = vcpu->kvm;
1028
1029         return 0;
1030 }
1031
1032 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1033 {
1034         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1035         kvm_clear_async_pf_completion_queue(vcpu);
1036         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1037                                     KVM_SYNC_GPRS |
1038                                     KVM_SYNC_ACRS |
1039                                     KVM_SYNC_CRS |
1040                                     KVM_SYNC_ARCH0 |
1041                                     KVM_SYNC_PFAULT;
1042
1043         if (kvm_is_ucontrol(vcpu->kvm))
1044                 return __kvm_ucontrol_vcpu_init(vcpu);
1045
1046         return 0;
1047 }
1048
1049 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1050 {
1051         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1052         save_fp_regs(vcpu->arch.host_fpregs.fprs);
1053         save_access_regs(vcpu->arch.host_acrs);
1054         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1055         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1056         restore_access_regs(vcpu->run->s.regs.acrs);
1057         gmap_enable(vcpu->arch.gmap);
1058         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1059 }
1060
1061 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1062 {
1063         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1064         gmap_disable(vcpu->arch.gmap);
1065         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1066         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1067         save_access_regs(vcpu->run->s.regs.acrs);
1068         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1069         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1070         restore_access_regs(vcpu->arch.host_acrs);
1071 }
1072
1073 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1074 {
1075         /* this equals initial cpu reset in pop, but we don't switch to ESA */
1076         vcpu->arch.sie_block->gpsw.mask = 0UL;
1077         vcpu->arch.sie_block->gpsw.addr = 0UL;
1078         kvm_s390_set_prefix(vcpu, 0);
1079         vcpu->arch.sie_block->cputm     = 0UL;
1080         vcpu->arch.sie_block->ckc       = 0UL;
1081         vcpu->arch.sie_block->todpr     = 0;
1082         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1083         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1084         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1085         vcpu->arch.guest_fpregs.fpc = 0;
1086         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1087         vcpu->arch.sie_block->gbea = 1;
1088         vcpu->arch.sie_block->pp = 0;
1089         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1090         kvm_clear_async_pf_completion_queue(vcpu);
1091         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1092                 kvm_s390_vcpu_stop(vcpu);
1093         kvm_s390_clear_local_irqs(vcpu);
1094 }
1095
1096 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1097 {
1098         mutex_lock(&vcpu->kvm->lock);
1099         vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1100         mutex_unlock(&vcpu->kvm->lock);
1101         if (!kvm_is_ucontrol(vcpu->kvm))
1102                 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1103 }
1104
1105 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1106 {
1107         if (!test_kvm_facility(vcpu->kvm, 76))
1108                 return;
1109
1110         vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1111
1112         if (vcpu->kvm->arch.crypto.aes_kw)
1113                 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1114         if (vcpu->kvm->arch.crypto.dea_kw)
1115                 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1116
1117         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1118 }
1119
1120 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1121 {
1122         free_page(vcpu->arch.sie_block->cbrlo);
1123         vcpu->arch.sie_block->cbrlo = 0;
1124 }
1125
1126 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1127 {
1128         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1129         if (!vcpu->arch.sie_block->cbrlo)
1130                 return -ENOMEM;
1131
1132         vcpu->arch.sie_block->ecb2 |= 0x80;
1133         vcpu->arch.sie_block->ecb2 &= ~0x08;
1134         return 0;
1135 }
1136
1137 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1138 {
1139         int rc = 0;
1140
1141         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1142                                                     CPUSTAT_SM |
1143                                                     CPUSTAT_STOPPED |
1144                                                     CPUSTAT_GED);
1145         vcpu->arch.sie_block->ecb   = 6;
1146         if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1147                 vcpu->arch.sie_block->ecb |= 0x10;
1148
1149         vcpu->arch.sie_block->ecb2  = 8;
1150         vcpu->arch.sie_block->eca   = 0xC1002000U;
1151         if (sclp_has_siif())
1152                 vcpu->arch.sie_block->eca |= 1;
1153         if (sclp_has_sigpif())
1154                 vcpu->arch.sie_block->eca |= 0x10000000U;
1155         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
1156                                       ICTL_TPROT;
1157
1158         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1159                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1160                 if (rc)
1161                         return rc;
1162         }
1163         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1164         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1165
1166         mutex_lock(&vcpu->kvm->lock);
1167         vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1168         memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1169                S390_ARCH_FAC_LIST_SIZE_BYTE);
1170         vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
1171         mutex_unlock(&vcpu->kvm->lock);
1172
1173         kvm_s390_vcpu_crypto_setup(vcpu);
1174
1175         return rc;
1176 }
1177
1178 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1179                                       unsigned int id)
1180 {
1181         struct kvm_vcpu *vcpu;
1182         struct sie_page *sie_page;
1183         int rc = -EINVAL;
1184
1185         if (id >= KVM_MAX_VCPUS)
1186                 goto out;
1187
1188         rc = -ENOMEM;
1189
1190         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1191         if (!vcpu)
1192                 goto out;
1193
1194         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1195         if (!sie_page)
1196                 goto out_free_cpu;
1197
1198         vcpu->arch.sie_block = &sie_page->sie_block;
1199         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1200
1201         vcpu->arch.sie_block->icpua = id;
1202         if (!kvm_is_ucontrol(kvm)) {
1203                 if (!kvm->arch.sca) {
1204                         WARN_ON_ONCE(1);
1205                         goto out_free_cpu;
1206                 }
1207                 if (!kvm->arch.sca->cpu[id].sda)
1208                         kvm->arch.sca->cpu[id].sda =
1209                                 (__u64) vcpu->arch.sie_block;
1210                 vcpu->arch.sie_block->scaoh =
1211                         (__u32)(((__u64)kvm->arch.sca) >> 32);
1212                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1213                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1214         }
1215         vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
1216
1217         spin_lock_init(&vcpu->arch.local_int.lock);
1218         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1219         vcpu->arch.local_int.wq = &vcpu->wq;
1220         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1221
1222         rc = kvm_vcpu_init(vcpu, kvm, id);
1223         if (rc)
1224                 goto out_free_sie_block;
1225         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1226                  vcpu->arch.sie_block);
1227         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1228
1229         return vcpu;
1230 out_free_sie_block:
1231         free_page((unsigned long)(vcpu->arch.sie_block));
1232 out_free_cpu:
1233         kmem_cache_free(kvm_vcpu_cache, vcpu);
1234 out:
1235         return ERR_PTR(rc);
1236 }
1237
1238 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1239 {
1240         return kvm_s390_vcpu_has_irq(vcpu, 0);
1241 }
1242
1243 void s390_vcpu_block(struct kvm_vcpu *vcpu)
1244 {
1245         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1246 }
1247
1248 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1249 {
1250         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1251 }
1252
1253 /*
1254  * Kick a guest cpu out of SIE and wait until SIE is not running.
1255  * If the CPU is not running (e.g. waiting as idle) the function will
1256  * return immediately. */
1257 void exit_sie(struct kvm_vcpu *vcpu)
1258 {
1259         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1260         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1261                 cpu_relax();
1262 }
1263
1264 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1265 void exit_sie_sync(struct kvm_vcpu *vcpu)
1266 {
1267         s390_vcpu_block(vcpu);
1268         exit_sie(vcpu);
1269 }
1270
1271 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1272 {
1273         int i;
1274         struct kvm *kvm = gmap->private;
1275         struct kvm_vcpu *vcpu;
1276
1277         kvm_for_each_vcpu(i, vcpu, kvm) {
1278                 /* match against both prefix pages */
1279                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1280                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1281                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1282                         exit_sie_sync(vcpu);
1283                 }
1284         }
1285 }
1286
1287 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1288 {
1289         /* kvm common code refers to this, but never calls it */
1290         BUG();
1291         return 0;
1292 }
1293
1294 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1295                                            struct kvm_one_reg *reg)
1296 {
1297         int r = -EINVAL;
1298
1299         switch (reg->id) {
1300         case KVM_REG_S390_TODPR:
1301                 r = put_user(vcpu->arch.sie_block->todpr,
1302                              (u32 __user *)reg->addr);
1303                 break;
1304         case KVM_REG_S390_EPOCHDIFF:
1305                 r = put_user(vcpu->arch.sie_block->epoch,
1306                              (u64 __user *)reg->addr);
1307                 break;
1308         case KVM_REG_S390_CPU_TIMER:
1309                 r = put_user(vcpu->arch.sie_block->cputm,
1310                              (u64 __user *)reg->addr);
1311                 break;
1312         case KVM_REG_S390_CLOCK_COMP:
1313                 r = put_user(vcpu->arch.sie_block->ckc,
1314                              (u64 __user *)reg->addr);
1315                 break;
1316         case KVM_REG_S390_PFTOKEN:
1317                 r = put_user(vcpu->arch.pfault_token,
1318                              (u64 __user *)reg->addr);
1319                 break;
1320         case KVM_REG_S390_PFCOMPARE:
1321                 r = put_user(vcpu->arch.pfault_compare,
1322                              (u64 __user *)reg->addr);
1323                 break;
1324         case KVM_REG_S390_PFSELECT:
1325                 r = put_user(vcpu->arch.pfault_select,
1326                              (u64 __user *)reg->addr);
1327                 break;
1328         case KVM_REG_S390_PP:
1329                 r = put_user(vcpu->arch.sie_block->pp,
1330                              (u64 __user *)reg->addr);
1331                 break;
1332         case KVM_REG_S390_GBEA:
1333                 r = put_user(vcpu->arch.sie_block->gbea,
1334                              (u64 __user *)reg->addr);
1335                 break;
1336         default:
1337                 break;
1338         }
1339
1340         return r;
1341 }
1342
1343 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1344                                            struct kvm_one_reg *reg)
1345 {
1346         int r = -EINVAL;
1347
1348         switch (reg->id) {
1349         case KVM_REG_S390_TODPR:
1350                 r = get_user(vcpu->arch.sie_block->todpr,
1351                              (u32 __user *)reg->addr);
1352                 break;
1353         case KVM_REG_S390_EPOCHDIFF:
1354                 r = get_user(vcpu->arch.sie_block->epoch,
1355                              (u64 __user *)reg->addr);
1356                 break;
1357         case KVM_REG_S390_CPU_TIMER:
1358                 r = get_user(vcpu->arch.sie_block->cputm,
1359                              (u64 __user *)reg->addr);
1360                 break;
1361         case KVM_REG_S390_CLOCK_COMP:
1362                 r = get_user(vcpu->arch.sie_block->ckc,
1363                              (u64 __user *)reg->addr);
1364                 break;
1365         case KVM_REG_S390_PFTOKEN:
1366                 r = get_user(vcpu->arch.pfault_token,
1367                              (u64 __user *)reg->addr);
1368                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1369                         kvm_clear_async_pf_completion_queue(vcpu);
1370                 break;
1371         case KVM_REG_S390_PFCOMPARE:
1372                 r = get_user(vcpu->arch.pfault_compare,
1373                              (u64 __user *)reg->addr);
1374                 break;
1375         case KVM_REG_S390_PFSELECT:
1376                 r = get_user(vcpu->arch.pfault_select,
1377                              (u64 __user *)reg->addr);
1378                 break;
1379         case KVM_REG_S390_PP:
1380                 r = get_user(vcpu->arch.sie_block->pp,
1381                              (u64 __user *)reg->addr);
1382                 break;
1383         case KVM_REG_S390_GBEA:
1384                 r = get_user(vcpu->arch.sie_block->gbea,
1385                              (u64 __user *)reg->addr);
1386                 break;
1387         default:
1388                 break;
1389         }
1390
1391         return r;
1392 }
1393
1394 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1395 {
1396         kvm_s390_vcpu_initial_reset(vcpu);
1397         return 0;
1398 }
1399
1400 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1401 {
1402         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1403         return 0;
1404 }
1405
1406 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1407 {
1408         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1409         return 0;
1410 }
1411
1412 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1413                                   struct kvm_sregs *sregs)
1414 {
1415         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1416         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1417         restore_access_regs(vcpu->run->s.regs.acrs);
1418         return 0;
1419 }
1420
1421 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1422                                   struct kvm_sregs *sregs)
1423 {
1424         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1425         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1426         return 0;
1427 }
1428
1429 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1430 {
1431         if (test_fp_ctl(fpu->fpc))
1432                 return -EINVAL;
1433         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1434         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1435         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1436         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1437         return 0;
1438 }
1439
1440 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1441 {
1442         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1443         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1444         return 0;
1445 }
1446
1447 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1448 {
1449         int rc = 0;
1450
1451         if (!is_vcpu_stopped(vcpu))
1452                 rc = -EBUSY;
1453         else {
1454                 vcpu->run->psw_mask = psw.mask;
1455                 vcpu->run->psw_addr = psw.addr;
1456         }
1457         return rc;
1458 }
1459
1460 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1461                                   struct kvm_translation *tr)
1462 {
1463         return -EINVAL; /* not implemented yet */
1464 }
1465
1466 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1467                               KVM_GUESTDBG_USE_HW_BP | \
1468                               KVM_GUESTDBG_ENABLE)
1469
1470 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1471                                         struct kvm_guest_debug *dbg)
1472 {
1473         int rc = 0;
1474
1475         vcpu->guest_debug = 0;
1476         kvm_s390_clear_bp_data(vcpu);
1477
1478         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1479                 return -EINVAL;
1480
1481         if (dbg->control & KVM_GUESTDBG_ENABLE) {
1482                 vcpu->guest_debug = dbg->control;
1483                 /* enforce guest PER */
1484                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1485
1486                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1487                         rc = kvm_s390_import_bp_data(vcpu, dbg);
1488         } else {
1489                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1490                 vcpu->arch.guestdbg.last_bp = 0;
1491         }
1492
1493         if (rc) {
1494                 vcpu->guest_debug = 0;
1495                 kvm_s390_clear_bp_data(vcpu);
1496                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1497         }
1498
1499         return rc;
1500 }
1501
1502 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1503                                     struct kvm_mp_state *mp_state)
1504 {
1505         /* CHECK_STOP and LOAD are not supported yet */
1506         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1507                                        KVM_MP_STATE_OPERATING;
1508 }
1509
1510 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1511                                     struct kvm_mp_state *mp_state)
1512 {
1513         int rc = 0;
1514
1515         /* user space knows about this interface - let it control the state */
1516         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1517
1518         switch (mp_state->mp_state) {
1519         case KVM_MP_STATE_STOPPED:
1520                 kvm_s390_vcpu_stop(vcpu);
1521                 break;
1522         case KVM_MP_STATE_OPERATING:
1523                 kvm_s390_vcpu_start(vcpu);
1524                 break;
1525         case KVM_MP_STATE_LOAD:
1526         case KVM_MP_STATE_CHECK_STOP:
1527                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1528         default:
1529                 rc = -ENXIO;
1530         }
1531
1532         return rc;
1533 }
1534
1535 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1536 {
1537         if (!MACHINE_IS_LPAR)
1538                 return false;
1539         /* only enable for z10 and later */
1540         if (!MACHINE_HAS_EDAT1)
1541                 return false;
1542         if (!kvm->arch.use_cmma)
1543                 return false;
1544         return true;
1545 }
1546
1547 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1548 {
1549         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1550 }
1551
1552 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1553 {
1554 retry:
1555         s390_vcpu_unblock(vcpu);
1556         /*
1557          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1558          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1559          * This ensures that the ipte instruction for this request has
1560          * already finished. We might race against a second unmapper that
1561          * wants to set the blocking bit. Lets just retry the request loop.
1562          */
1563         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1564                 int rc;
1565                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1566                                       kvm_s390_get_prefix(vcpu),
1567                                       PAGE_SIZE * 2);
1568                 if (rc)
1569                         return rc;
1570                 goto retry;
1571         }
1572
1573         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1574                 vcpu->arch.sie_block->ihcpu = 0xffff;
1575                 goto retry;
1576         }
1577
1578         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1579                 if (!ibs_enabled(vcpu)) {
1580                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1581                         atomic_set_mask(CPUSTAT_IBS,
1582                                         &vcpu->arch.sie_block->cpuflags);
1583                 }
1584                 goto retry;
1585         }
1586
1587         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1588                 if (ibs_enabled(vcpu)) {
1589                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1590                         atomic_clear_mask(CPUSTAT_IBS,
1591                                           &vcpu->arch.sie_block->cpuflags);
1592                 }
1593                 goto retry;
1594         }
1595
1596         /* nothing to do, just clear the request */
1597         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1598
1599         return 0;
1600 }
1601
1602 /**
1603  * kvm_arch_fault_in_page - fault-in guest page if necessary
1604  * @vcpu: The corresponding virtual cpu
1605  * @gpa: Guest physical address
1606  * @writable: Whether the page should be writable or not
1607  *
1608  * Make sure that a guest page has been faulted-in on the host.
1609  *
1610  * Return: Zero on success, negative error code otherwise.
1611  */
1612 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1613 {
1614         return gmap_fault(vcpu->arch.gmap, gpa,
1615                           writable ? FAULT_FLAG_WRITE : 0);
1616 }
1617
1618 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1619                                       unsigned long token)
1620 {
1621         struct kvm_s390_interrupt inti;
1622         struct kvm_s390_irq irq;
1623
1624         if (start_token) {
1625                 irq.u.ext.ext_params2 = token;
1626                 irq.type = KVM_S390_INT_PFAULT_INIT;
1627                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1628         } else {
1629                 inti.type = KVM_S390_INT_PFAULT_DONE;
1630                 inti.parm64 = token;
1631                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1632         }
1633 }
1634
1635 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1636                                      struct kvm_async_pf *work)
1637 {
1638         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1639         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1640 }
1641
1642 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1643                                  struct kvm_async_pf *work)
1644 {
1645         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1646         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1647 }
1648
1649 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1650                                struct kvm_async_pf *work)
1651 {
1652         /* s390 will always inject the page directly */
1653 }
1654
1655 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1656 {
1657         /*
1658          * s390 will always inject the page directly,
1659          * but we still want check_async_completion to cleanup
1660          */
1661         return true;
1662 }
1663
1664 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1665 {
1666         hva_t hva;
1667         struct kvm_arch_async_pf arch;
1668         int rc;
1669
1670         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1671                 return 0;
1672         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1673             vcpu->arch.pfault_compare)
1674                 return 0;
1675         if (psw_extint_disabled(vcpu))
1676                 return 0;
1677         if (kvm_s390_vcpu_has_irq(vcpu, 0))
1678                 return 0;
1679         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1680                 return 0;
1681         if (!vcpu->arch.gmap->pfault_enabled)
1682                 return 0;
1683
1684         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1685         hva += current->thread.gmap_addr & ~PAGE_MASK;
1686         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1687                 return 0;
1688
1689         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1690         return rc;
1691 }
1692
1693 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1694 {
1695         int rc, cpuflags;
1696
1697         /*
1698          * On s390 notifications for arriving pages will be delivered directly
1699          * to the guest but the house keeping for completed pfaults is
1700          * handled outside the worker.
1701          */
1702         kvm_check_async_pf_completion(vcpu);
1703
1704         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1705
1706         if (need_resched())
1707                 schedule();
1708
1709         if (test_cpu_flag(CIF_MCCK_PENDING))
1710                 s390_handle_mcck();
1711
1712         if (!kvm_is_ucontrol(vcpu->kvm)) {
1713                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1714                 if (rc)
1715                         return rc;
1716         }
1717
1718         rc = kvm_s390_handle_requests(vcpu);
1719         if (rc)
1720                 return rc;
1721
1722         if (guestdbg_enabled(vcpu)) {
1723                 kvm_s390_backup_guest_per_regs(vcpu);
1724                 kvm_s390_patch_guest_per_regs(vcpu);
1725         }
1726
1727         vcpu->arch.sie_block->icptcode = 0;
1728         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1729         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1730         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1731
1732         return 0;
1733 }
1734
1735 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1736 {
1737         int rc = -1;
1738
1739         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1740                    vcpu->arch.sie_block->icptcode);
1741         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1742
1743         if (guestdbg_enabled(vcpu))
1744                 kvm_s390_restore_guest_per_regs(vcpu);
1745
1746         if (exit_reason >= 0) {
1747                 rc = 0;
1748         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1749                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1750                 vcpu->run->s390_ucontrol.trans_exc_code =
1751                                                 current->thread.gmap_addr;
1752                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1753                 rc = -EREMOTE;
1754
1755         } else if (current->thread.gmap_pfault) {
1756                 trace_kvm_s390_major_guest_pfault(vcpu);
1757                 current->thread.gmap_pfault = 0;
1758                 if (kvm_arch_setup_async_pf(vcpu)) {
1759                         rc = 0;
1760                 } else {
1761                         gpa_t gpa = current->thread.gmap_addr;
1762                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1763                 }
1764         }
1765
1766         if (rc == -1) {
1767                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1768                 trace_kvm_s390_sie_fault(vcpu);
1769                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1770         }
1771
1772         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1773
1774         if (rc == 0) {
1775                 if (kvm_is_ucontrol(vcpu->kvm))
1776                         /* Don't exit for host interrupts. */
1777                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1778                 else
1779                         rc = kvm_handle_sie_intercept(vcpu);
1780         }
1781
1782         return rc;
1783 }
1784
1785 static int __vcpu_run(struct kvm_vcpu *vcpu)
1786 {
1787         int rc, exit_reason;
1788
1789         /*
1790          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1791          * ning the guest), so that memslots (and other stuff) are protected
1792          */
1793         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1794
1795         do {
1796                 rc = vcpu_pre_run(vcpu);
1797                 if (rc)
1798                         break;
1799
1800                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1801                 /*
1802                  * As PF_VCPU will be used in fault handler, between
1803                  * guest_enter and guest_exit should be no uaccess.
1804                  */
1805                 preempt_disable();
1806                 kvm_guest_enter();
1807                 preempt_enable();
1808                 exit_reason = sie64a(vcpu->arch.sie_block,
1809                                      vcpu->run->s.regs.gprs);
1810                 kvm_guest_exit();
1811                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1812
1813                 rc = vcpu_post_run(vcpu, exit_reason);
1814         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1815
1816         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1817         return rc;
1818 }
1819
1820 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1821 {
1822         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1823         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1824         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1825                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1826         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1827                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1828                 /* some control register changes require a tlb flush */
1829                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1830         }
1831         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1832                 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1833                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1834                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1835                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1836                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1837         }
1838         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1839                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1840                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1841                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1842                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1843                         kvm_clear_async_pf_completion_queue(vcpu);
1844         }
1845         kvm_run->kvm_dirty_regs = 0;
1846 }
1847
1848 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1849 {
1850         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1851         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1852         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1853         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1854         kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1855         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1856         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1857         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1858         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1859         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1860         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1861         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1862 }
1863
1864 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1865 {
1866         int rc;
1867         sigset_t sigsaved;
1868
1869         if (guestdbg_exit_pending(vcpu)) {
1870                 kvm_s390_prepare_debug_exit(vcpu);
1871                 return 0;
1872         }
1873
1874         if (vcpu->sigset_active)
1875                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1876
1877         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1878                 kvm_s390_vcpu_start(vcpu);
1879         } else if (is_vcpu_stopped(vcpu)) {
1880                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1881                                    vcpu->vcpu_id);
1882                 return -EINVAL;
1883         }
1884
1885         sync_regs(vcpu, kvm_run);
1886
1887         might_fault();
1888         rc = __vcpu_run(vcpu);
1889
1890         if (signal_pending(current) && !rc) {
1891                 kvm_run->exit_reason = KVM_EXIT_INTR;
1892                 rc = -EINTR;
1893         }
1894
1895         if (guestdbg_exit_pending(vcpu) && !rc)  {
1896                 kvm_s390_prepare_debug_exit(vcpu);
1897                 rc = 0;
1898         }
1899
1900         if (rc == -EOPNOTSUPP) {
1901                 /* intercept cannot be handled in-kernel, prepare kvm-run */
1902                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1903                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1904                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1905                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1906                 rc = 0;
1907         }
1908
1909         if (rc == -EREMOTE) {
1910                 /* intercept was handled, but userspace support is needed
1911                  * kvm_run has been prepared by the handler */
1912                 rc = 0;
1913         }
1914
1915         store_regs(vcpu, kvm_run);
1916
1917         if (vcpu->sigset_active)
1918                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1919
1920         vcpu->stat.exit_userspace++;
1921         return rc;
1922 }
1923
1924 /*
1925  * store status at address
1926  * we use have two special cases:
1927  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1928  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1929  */
1930 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1931 {
1932         unsigned char archmode = 1;
1933         unsigned int px;
1934         u64 clkcomp;
1935         int rc;
1936
1937         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1938                 if (write_guest_abs(vcpu, 163, &archmode, 1))
1939                         return -EFAULT;
1940                 gpa = SAVE_AREA_BASE;
1941         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1942                 if (write_guest_real(vcpu, 163, &archmode, 1))
1943                         return -EFAULT;
1944                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1945         }
1946         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1947                              vcpu->arch.guest_fpregs.fprs, 128);
1948         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1949                               vcpu->run->s.regs.gprs, 128);
1950         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1951                               &vcpu->arch.sie_block->gpsw, 16);
1952         px = kvm_s390_get_prefix(vcpu);
1953         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1954                               &px, 4);
1955         rc |= write_guest_abs(vcpu,
1956                               gpa + offsetof(struct save_area, fp_ctrl_reg),
1957                               &vcpu->arch.guest_fpregs.fpc, 4);
1958         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1959                               &vcpu->arch.sie_block->todpr, 4);
1960         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1961                               &vcpu->arch.sie_block->cputm, 8);
1962         clkcomp = vcpu->arch.sie_block->ckc >> 8;
1963         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1964                               &clkcomp, 8);
1965         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1966                               &vcpu->run->s.regs.acrs, 64);
1967         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1968                               &vcpu->arch.sie_block->gcr, 128);
1969         return rc ? -EFAULT : 0;
1970 }
1971
1972 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1973 {
1974         /*
1975          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1976          * copying in vcpu load/put. Lets update our copies before we save
1977          * it into the save area
1978          */
1979         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1980         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1981         save_access_regs(vcpu->run->s.regs.acrs);
1982
1983         return kvm_s390_store_status_unloaded(vcpu, addr);
1984 }
1985
1986 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1987 {
1988         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1989         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1990         exit_sie_sync(vcpu);
1991 }
1992
1993 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1994 {
1995         unsigned int i;
1996         struct kvm_vcpu *vcpu;
1997
1998         kvm_for_each_vcpu(i, vcpu, kvm) {
1999                 __disable_ibs_on_vcpu(vcpu);
2000         }
2001 }
2002
2003 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2004 {
2005         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2006         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2007         exit_sie_sync(vcpu);
2008 }
2009
2010 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2011 {
2012         int i, online_vcpus, started_vcpus = 0;
2013
2014         if (!is_vcpu_stopped(vcpu))
2015                 return;
2016
2017         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2018         /* Only one cpu at a time may enter/leave the STOPPED state. */
2019         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2020         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2021
2022         for (i = 0; i < online_vcpus; i++) {
2023                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2024                         started_vcpus++;
2025         }
2026
2027         if (started_vcpus == 0) {
2028                 /* we're the only active VCPU -> speed it up */
2029                 __enable_ibs_on_vcpu(vcpu);
2030         } else if (started_vcpus == 1) {
2031                 /*
2032                  * As we are starting a second VCPU, we have to disable
2033                  * the IBS facility on all VCPUs to remove potentially
2034                  * oustanding ENABLE requests.
2035                  */
2036                 __disable_ibs_on_all_vcpus(vcpu->kvm);
2037         }
2038
2039         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2040         /*
2041          * Another VCPU might have used IBS while we were offline.
2042          * Let's play safe and flush the VCPU at startup.
2043          */
2044         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2045         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2046         return;
2047 }
2048
2049 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2050 {
2051         int i, online_vcpus, started_vcpus = 0;
2052         struct kvm_vcpu *started_vcpu = NULL;
2053
2054         if (is_vcpu_stopped(vcpu))
2055                 return;
2056
2057         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2058         /* Only one cpu at a time may enter/leave the STOPPED state. */
2059         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2060         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2061
2062         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2063         kvm_s390_clear_stop_irq(vcpu);
2064
2065         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2066         __disable_ibs_on_vcpu(vcpu);
2067
2068         for (i = 0; i < online_vcpus; i++) {
2069                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2070                         started_vcpus++;
2071                         started_vcpu = vcpu->kvm->vcpus[i];
2072                 }
2073         }
2074
2075         if (started_vcpus == 1) {
2076                 /*
2077                  * As we only have one VCPU left, we want to enable the
2078                  * IBS facility for that VCPU to speed it up.
2079                  */
2080                 __enable_ibs_on_vcpu(started_vcpu);
2081         }
2082
2083         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2084         return;
2085 }
2086
2087 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2088                                      struct kvm_enable_cap *cap)
2089 {
2090         int r;
2091
2092         if (cap->flags)
2093                 return -EINVAL;
2094
2095         switch (cap->cap) {
2096         case KVM_CAP_S390_CSS_SUPPORT:
2097                 if (!vcpu->kvm->arch.css_support) {
2098                         vcpu->kvm->arch.css_support = 1;
2099                         trace_kvm_s390_enable_css(vcpu->kvm);
2100                 }
2101                 r = 0;
2102                 break;
2103         default:
2104                 r = -EINVAL;
2105                 break;
2106         }
2107         return r;
2108 }
2109
2110 long kvm_arch_vcpu_ioctl(struct file *filp,
2111                          unsigned int ioctl, unsigned long arg)
2112 {
2113         struct kvm_vcpu *vcpu = filp->private_data;
2114         void __user *argp = (void __user *)arg;
2115         int idx;
2116         long r;
2117
2118         switch (ioctl) {
2119         case KVM_S390_INTERRUPT: {
2120                 struct kvm_s390_interrupt s390int;
2121                 struct kvm_s390_irq s390irq;
2122
2123                 r = -EFAULT;
2124                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2125                         break;
2126                 if (s390int_to_s390irq(&s390int, &s390irq))
2127                         return -EINVAL;
2128                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2129                 break;
2130         }
2131         case KVM_S390_STORE_STATUS:
2132                 idx = srcu_read_lock(&vcpu->kvm->srcu);
2133                 r = kvm_s390_vcpu_store_status(vcpu, arg);
2134                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2135                 break;
2136         case KVM_S390_SET_INITIAL_PSW: {
2137                 psw_t psw;
2138
2139                 r = -EFAULT;
2140                 if (copy_from_user(&psw, argp, sizeof(psw)))
2141                         break;
2142                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2143                 break;
2144         }
2145         case KVM_S390_INITIAL_RESET:
2146                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2147                 break;
2148         case KVM_SET_ONE_REG:
2149         case KVM_GET_ONE_REG: {
2150                 struct kvm_one_reg reg;
2151                 r = -EFAULT;
2152                 if (copy_from_user(&reg, argp, sizeof(reg)))
2153                         break;
2154                 if (ioctl == KVM_SET_ONE_REG)
2155                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2156                 else
2157                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2158                 break;
2159         }
2160 #ifdef CONFIG_KVM_S390_UCONTROL
2161         case KVM_S390_UCAS_MAP: {
2162                 struct kvm_s390_ucas_mapping ucasmap;
2163
2164                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2165                         r = -EFAULT;
2166                         break;
2167                 }
2168
2169                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2170                         r = -EINVAL;
2171                         break;
2172                 }
2173
2174                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2175                                      ucasmap.vcpu_addr, ucasmap.length);
2176                 break;
2177         }
2178         case KVM_S390_UCAS_UNMAP: {
2179                 struct kvm_s390_ucas_mapping ucasmap;
2180
2181                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2182                         r = -EFAULT;
2183                         break;
2184                 }
2185
2186                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2187                         r = -EINVAL;
2188                         break;
2189                 }
2190
2191                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2192                         ucasmap.length);
2193                 break;
2194         }
2195 #endif
2196         case KVM_S390_VCPU_FAULT: {
2197                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2198                 break;
2199         }
2200         case KVM_ENABLE_CAP:
2201         {
2202                 struct kvm_enable_cap cap;
2203                 r = -EFAULT;
2204                 if (copy_from_user(&cap, argp, sizeof(cap)))
2205                         break;
2206                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2207                 break;
2208         }
2209         default:
2210                 r = -ENOTTY;
2211         }
2212         return r;
2213 }
2214
2215 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2216 {
2217 #ifdef CONFIG_KVM_S390_UCONTROL
2218         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2219                  && (kvm_is_ucontrol(vcpu->kvm))) {
2220                 vmf->page = virt_to_page(vcpu->arch.sie_block);
2221                 get_page(vmf->page);
2222                 return 0;
2223         }
2224 #endif
2225         return VM_FAULT_SIGBUS;
2226 }
2227
2228 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2229                             unsigned long npages)
2230 {
2231         return 0;
2232 }
2233
2234 /* Section: memory related */
2235 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2236                                    struct kvm_memory_slot *memslot,
2237                                    struct kvm_userspace_memory_region *mem,
2238                                    enum kvm_mr_change change)
2239 {
2240         /* A few sanity checks. We can have memory slots which have to be
2241            located/ended at a segment boundary (1MB). The memory in userland is
2242            ok to be fragmented into various different vmas. It is okay to mmap()
2243            and munmap() stuff in this slot after doing this call at any time */
2244
2245         if (mem->userspace_addr & 0xffffful)
2246                 return -EINVAL;
2247
2248         if (mem->memory_size & 0xffffful)
2249                 return -EINVAL;
2250
2251         return 0;
2252 }
2253
2254 void kvm_arch_commit_memory_region(struct kvm *kvm,
2255                                 struct kvm_userspace_memory_region *mem,
2256                                 const struct kvm_memory_slot *old,
2257                                 enum kvm_mr_change change)
2258 {
2259         int rc;
2260
2261         /* If the basics of the memslot do not change, we do not want
2262          * to update the gmap. Every update causes several unnecessary
2263          * segment translation exceptions. This is usually handled just
2264          * fine by the normal fault handler + gmap, but it will also
2265          * cause faults on the prefix page of running guest CPUs.
2266          */
2267         if (old->userspace_addr == mem->userspace_addr &&
2268             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2269             old->npages * PAGE_SIZE == mem->memory_size)
2270                 return;
2271
2272         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2273                 mem->guest_phys_addr, mem->memory_size);
2274         if (rc)
2275                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2276         return;
2277 }
2278
2279 static int __init kvm_s390_init(void)
2280 {
2281         return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2282 }
2283
2284 static void __exit kvm_s390_exit(void)
2285 {
2286         kvm_exit();
2287 }
2288
2289 module_init(kvm_s390_init);
2290 module_exit(kvm_s390_exit);
2291
2292 /*
2293  * Enable autoloading of the kvm module.
2294  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2295  * since x86 takes a different approach.
2296  */
2297 #include <linux/miscdevice.h>
2298 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2299 MODULE_ALIAS("devname:kvm");