]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/kvm/kvm-s390.c
KVM: add kvm_arch_sched_in
[karo-tx-linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
30 #include <asm/nmi.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
33 #include <asm/sclp.h>
34 #include "kvm-s390.h"
35 #include "gaccess.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 #include "trace-s390.h"
40
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44         { "userspace_handled", VCPU_STAT(exit_userspace) },
45         { "exit_null", VCPU_STAT(exit_null) },
46         { "exit_validity", VCPU_STAT(exit_validity) },
47         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48         { "exit_external_request", VCPU_STAT(exit_external_request) },
49         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50         { "exit_instruction", VCPU_STAT(exit_instruction) },
51         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
57         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
58         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
59         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
66         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68         { "instruction_spx", VCPU_STAT(instruction_spx) },
69         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70         { "instruction_stap", VCPU_STAT(instruction_stap) },
71         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
72         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
75         { "instruction_essa", VCPU_STAT(instruction_essa) },
76         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
78         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
79         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
81         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
82         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87         { "diagnose_10", VCPU_STAT(diagnose_10) },
88         { "diagnose_44", VCPU_STAT(diagnose_44) },
89         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
90         { NULL }
91 };
92
93 unsigned long *vfacilities;
94 static struct gmap_notifier gmap_notifier;
95
96 /* test availability of vfacility */
97 int test_vfacility(unsigned long nr)
98 {
99         return __test_facility(nr, (void *) vfacilities);
100 }
101
102 /* Section: not file related */
103 int kvm_arch_hardware_enable(void *garbage)
104 {
105         /* every s390 is virtualization enabled ;-) */
106         return 0;
107 }
108
109 void kvm_arch_hardware_disable(void *garbage)
110 {
111 }
112
113 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
115 int kvm_arch_hardware_setup(void)
116 {
117         gmap_notifier.notifier_call = kvm_gmap_notifier;
118         gmap_register_ipte_notifier(&gmap_notifier);
119         return 0;
120 }
121
122 void kvm_arch_hardware_unsetup(void)
123 {
124         gmap_unregister_ipte_notifier(&gmap_notifier);
125 }
126
127 void kvm_arch_check_processor_compat(void *rtn)
128 {
129 }
130
131 int kvm_arch_init(void *opaque)
132 {
133         return 0;
134 }
135
136 void kvm_arch_exit(void)
137 {
138 }
139
140 /* Section: device related */
141 long kvm_arch_dev_ioctl(struct file *filp,
142                         unsigned int ioctl, unsigned long arg)
143 {
144         if (ioctl == KVM_S390_ENABLE_SIE)
145                 return s390_enable_sie();
146         return -EINVAL;
147 }
148
149 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
150 {
151         int r;
152
153         switch (ext) {
154         case KVM_CAP_S390_PSW:
155         case KVM_CAP_S390_GMAP:
156         case KVM_CAP_SYNC_MMU:
157 #ifdef CONFIG_KVM_S390_UCONTROL
158         case KVM_CAP_S390_UCONTROL:
159 #endif
160         case KVM_CAP_ASYNC_PF:
161         case KVM_CAP_SYNC_REGS:
162         case KVM_CAP_ONE_REG:
163         case KVM_CAP_ENABLE_CAP:
164         case KVM_CAP_S390_CSS_SUPPORT:
165         case KVM_CAP_IRQFD:
166         case KVM_CAP_IOEVENTFD:
167         case KVM_CAP_DEVICE_CTRL:
168         case KVM_CAP_ENABLE_CAP_VM:
169         case KVM_CAP_S390_IRQCHIP:
170         case KVM_CAP_VM_ATTRIBUTES:
171         case KVM_CAP_MP_STATE:
172                 r = 1;
173                 break;
174         case KVM_CAP_NR_VCPUS:
175         case KVM_CAP_MAX_VCPUS:
176                 r = KVM_MAX_VCPUS;
177                 break;
178         case KVM_CAP_NR_MEMSLOTS:
179                 r = KVM_USER_MEM_SLOTS;
180                 break;
181         case KVM_CAP_S390_COW:
182                 r = MACHINE_HAS_ESOP;
183                 break;
184         default:
185                 r = 0;
186         }
187         return r;
188 }
189
190 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191                                         struct kvm_memory_slot *memslot)
192 {
193         gfn_t cur_gfn, last_gfn;
194         unsigned long address;
195         struct gmap *gmap = kvm->arch.gmap;
196
197         down_read(&gmap->mm->mmap_sem);
198         /* Loop over all guest pages */
199         last_gfn = memslot->base_gfn + memslot->npages;
200         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201                 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203                 if (gmap_test_and_clear_dirty(address, gmap))
204                         mark_page_dirty(kvm, cur_gfn);
205         }
206         up_read(&gmap->mm->mmap_sem);
207 }
208
209 /* Section: vm related */
210 /*
211  * Get (and clear) the dirty memory log for a memory slot.
212  */
213 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214                                struct kvm_dirty_log *log)
215 {
216         int r;
217         unsigned long n;
218         struct kvm_memory_slot *memslot;
219         int is_dirty = 0;
220
221         mutex_lock(&kvm->slots_lock);
222
223         r = -EINVAL;
224         if (log->slot >= KVM_USER_MEM_SLOTS)
225                 goto out;
226
227         memslot = id_to_memslot(kvm->memslots, log->slot);
228         r = -ENOENT;
229         if (!memslot->dirty_bitmap)
230                 goto out;
231
232         kvm_s390_sync_dirty_log(kvm, memslot);
233         r = kvm_get_dirty_log(kvm, log, &is_dirty);
234         if (r)
235                 goto out;
236
237         /* Clear the dirty log */
238         if (is_dirty) {
239                 n = kvm_dirty_bitmap_bytes(memslot);
240                 memset(memslot->dirty_bitmap, 0, n);
241         }
242         r = 0;
243 out:
244         mutex_unlock(&kvm->slots_lock);
245         return r;
246 }
247
248 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249 {
250         int r;
251
252         if (cap->flags)
253                 return -EINVAL;
254
255         switch (cap->cap) {
256         case KVM_CAP_S390_IRQCHIP:
257                 kvm->arch.use_irqchip = 1;
258                 r = 0;
259                 break;
260         default:
261                 r = -EINVAL;
262                 break;
263         }
264         return r;
265 }
266
267 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268 {
269         int ret;
270         unsigned int idx;
271         switch (attr->attr) {
272         case KVM_S390_VM_MEM_ENABLE_CMMA:
273                 ret = -EBUSY;
274                 mutex_lock(&kvm->lock);
275                 if (atomic_read(&kvm->online_vcpus) == 0) {
276                         kvm->arch.use_cmma = 1;
277                         ret = 0;
278                 }
279                 mutex_unlock(&kvm->lock);
280                 break;
281         case KVM_S390_VM_MEM_CLR_CMMA:
282                 mutex_lock(&kvm->lock);
283                 idx = srcu_read_lock(&kvm->srcu);
284                 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285                 srcu_read_unlock(&kvm->srcu, idx);
286                 mutex_unlock(&kvm->lock);
287                 ret = 0;
288                 break;
289         default:
290                 ret = -ENXIO;
291                 break;
292         }
293         return ret;
294 }
295
296 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297 {
298         int ret;
299
300         switch (attr->group) {
301         case KVM_S390_VM_MEM_CTRL:
302                 ret = kvm_s390_mem_control(kvm, attr);
303                 break;
304         default:
305                 ret = -ENXIO;
306                 break;
307         }
308
309         return ret;
310 }
311
312 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313 {
314         return -ENXIO;
315 }
316
317 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318 {
319         int ret;
320
321         switch (attr->group) {
322         case KVM_S390_VM_MEM_CTRL:
323                 switch (attr->attr) {
324                 case KVM_S390_VM_MEM_ENABLE_CMMA:
325                 case KVM_S390_VM_MEM_CLR_CMMA:
326                         ret = 0;
327                         break;
328                 default:
329                         ret = -ENXIO;
330                         break;
331                 }
332                 break;
333         default:
334                 ret = -ENXIO;
335                 break;
336         }
337
338         return ret;
339 }
340
341 long kvm_arch_vm_ioctl(struct file *filp,
342                        unsigned int ioctl, unsigned long arg)
343 {
344         struct kvm *kvm = filp->private_data;
345         void __user *argp = (void __user *)arg;
346         struct kvm_device_attr attr;
347         int r;
348
349         switch (ioctl) {
350         case KVM_S390_INTERRUPT: {
351                 struct kvm_s390_interrupt s390int;
352
353                 r = -EFAULT;
354                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355                         break;
356                 r = kvm_s390_inject_vm(kvm, &s390int);
357                 break;
358         }
359         case KVM_ENABLE_CAP: {
360                 struct kvm_enable_cap cap;
361                 r = -EFAULT;
362                 if (copy_from_user(&cap, argp, sizeof(cap)))
363                         break;
364                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365                 break;
366         }
367         case KVM_CREATE_IRQCHIP: {
368                 struct kvm_irq_routing_entry routing;
369
370                 r = -EINVAL;
371                 if (kvm->arch.use_irqchip) {
372                         /* Set up dummy routing. */
373                         memset(&routing, 0, sizeof(routing));
374                         kvm_set_irq_routing(kvm, &routing, 0, 0);
375                         r = 0;
376                 }
377                 break;
378         }
379         case KVM_SET_DEVICE_ATTR: {
380                 r = -EFAULT;
381                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382                         break;
383                 r = kvm_s390_vm_set_attr(kvm, &attr);
384                 break;
385         }
386         case KVM_GET_DEVICE_ATTR: {
387                 r = -EFAULT;
388                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389                         break;
390                 r = kvm_s390_vm_get_attr(kvm, &attr);
391                 break;
392         }
393         case KVM_HAS_DEVICE_ATTR: {
394                 r = -EFAULT;
395                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396                         break;
397                 r = kvm_s390_vm_has_attr(kvm, &attr);
398                 break;
399         }
400         default:
401                 r = -ENOTTY;
402         }
403
404         return r;
405 }
406
407 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
408 {
409         int rc;
410         char debug_name[16];
411         static unsigned long sca_offset;
412
413         rc = -EINVAL;
414 #ifdef CONFIG_KVM_S390_UCONTROL
415         if (type & ~KVM_VM_S390_UCONTROL)
416                 goto out_err;
417         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418                 goto out_err;
419 #else
420         if (type)
421                 goto out_err;
422 #endif
423
424         rc = s390_enable_sie();
425         if (rc)
426                 goto out_err;
427
428         rc = -ENOMEM;
429
430         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431         if (!kvm->arch.sca)
432                 goto out_err;
433         spin_lock(&kvm_lock);
434         sca_offset = (sca_offset + 16) & 0x7f0;
435         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436         spin_unlock(&kvm_lock);
437
438         sprintf(debug_name, "kvm-%u", current->pid);
439
440         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441         if (!kvm->arch.dbf)
442                 goto out_nodbf;
443
444         spin_lock_init(&kvm->arch.float_int.lock);
445         INIT_LIST_HEAD(&kvm->arch.float_int.list);
446         init_waitqueue_head(&kvm->arch.ipte_wq);
447
448         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449         VM_EVENT(kvm, 3, "%s", "vm created");
450
451         if (type & KVM_VM_S390_UCONTROL) {
452                 kvm->arch.gmap = NULL;
453         } else {
454                 kvm->arch.gmap = gmap_alloc(current->mm);
455                 if (!kvm->arch.gmap)
456                         goto out_nogmap;
457                 kvm->arch.gmap->private = kvm;
458                 kvm->arch.gmap->pfault_enabled = 0;
459         }
460
461         kvm->arch.css_support = 0;
462         kvm->arch.use_irqchip = 0;
463
464         spin_lock_init(&kvm->arch.start_stop_lock);
465
466         return 0;
467 out_nogmap:
468         debug_unregister(kvm->arch.dbf);
469 out_nodbf:
470         free_page((unsigned long)(kvm->arch.sca));
471 out_err:
472         return rc;
473 }
474
475 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476 {
477         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
478         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
479         kvm_s390_clear_local_irqs(vcpu);
480         kvm_clear_async_pf_completion_queue(vcpu);
481         if (!kvm_is_ucontrol(vcpu->kvm)) {
482                 clear_bit(63 - vcpu->vcpu_id,
483                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485                     (__u64) vcpu->arch.sie_block)
486                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487         }
488         smp_mb();
489
490         if (kvm_is_ucontrol(vcpu->kvm))
491                 gmap_free(vcpu->arch.gmap);
492
493         if (kvm_s390_cmma_enabled(vcpu->kvm))
494                 kvm_s390_vcpu_unsetup_cmma(vcpu);
495         free_page((unsigned long)(vcpu->arch.sie_block));
496
497         kvm_vcpu_uninit(vcpu);
498         kmem_cache_free(kvm_vcpu_cache, vcpu);
499 }
500
501 static void kvm_free_vcpus(struct kvm *kvm)
502 {
503         unsigned int i;
504         struct kvm_vcpu *vcpu;
505
506         kvm_for_each_vcpu(i, vcpu, kvm)
507                 kvm_arch_vcpu_destroy(vcpu);
508
509         mutex_lock(&kvm->lock);
510         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511                 kvm->vcpus[i] = NULL;
512
513         atomic_set(&kvm->online_vcpus, 0);
514         mutex_unlock(&kvm->lock);
515 }
516
517 void kvm_arch_sync_events(struct kvm *kvm)
518 {
519 }
520
521 void kvm_arch_destroy_vm(struct kvm *kvm)
522 {
523         kvm_free_vcpus(kvm);
524         free_page((unsigned long)(kvm->arch.sca));
525         debug_unregister(kvm->arch.dbf);
526         if (!kvm_is_ucontrol(kvm))
527                 gmap_free(kvm->arch.gmap);
528         kvm_s390_destroy_adapters(kvm);
529         kvm_s390_clear_float_irqs(kvm);
530 }
531
532 /* Section: vcpu related */
533 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534 {
535         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536         kvm_clear_async_pf_completion_queue(vcpu);
537         if (kvm_is_ucontrol(vcpu->kvm)) {
538                 vcpu->arch.gmap = gmap_alloc(current->mm);
539                 if (!vcpu->arch.gmap)
540                         return -ENOMEM;
541                 vcpu->arch.gmap->private = vcpu->kvm;
542                 return 0;
543         }
544
545         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
546         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547                                     KVM_SYNC_GPRS |
548                                     KVM_SYNC_ACRS |
549                                     KVM_SYNC_CRS;
550         return 0;
551 }
552
553 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
554 {
555         /* Nothing todo */
556 }
557
558 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
559 {
560 }
561
562 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
563 {
564         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
565         save_fp_regs(vcpu->arch.host_fpregs.fprs);
566         save_access_regs(vcpu->arch.host_acrs);
567         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
568         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
569         restore_access_regs(vcpu->run->s.regs.acrs);
570         gmap_enable(vcpu->arch.gmap);
571         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
572 }
573
574 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
575 {
576         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
577         gmap_disable(vcpu->arch.gmap);
578         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
579         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
580         save_access_regs(vcpu->run->s.regs.acrs);
581         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
582         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
583         restore_access_regs(vcpu->arch.host_acrs);
584 }
585
586 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
587 {
588         /* this equals initial cpu reset in pop, but we don't switch to ESA */
589         vcpu->arch.sie_block->gpsw.mask = 0UL;
590         vcpu->arch.sie_block->gpsw.addr = 0UL;
591         kvm_s390_set_prefix(vcpu, 0);
592         vcpu->arch.sie_block->cputm     = 0UL;
593         vcpu->arch.sie_block->ckc       = 0UL;
594         vcpu->arch.sie_block->todpr     = 0;
595         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
596         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
597         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
598         vcpu->arch.guest_fpregs.fpc = 0;
599         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
600         vcpu->arch.sie_block->gbea = 1;
601         vcpu->arch.sie_block->pp = 0;
602         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
603         kvm_clear_async_pf_completion_queue(vcpu);
604         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
605                 kvm_s390_vcpu_stop(vcpu);
606         kvm_s390_clear_local_irqs(vcpu);
607 }
608
609 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
610 {
611         return 0;
612 }
613
614 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
615 {
616         free_page(vcpu->arch.sie_block->cbrlo);
617         vcpu->arch.sie_block->cbrlo = 0;
618 }
619
620 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
621 {
622         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
623         if (!vcpu->arch.sie_block->cbrlo)
624                 return -ENOMEM;
625
626         vcpu->arch.sie_block->ecb2 |= 0x80;
627         vcpu->arch.sie_block->ecb2 &= ~0x08;
628         return 0;
629 }
630
631 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
632 {
633         int rc = 0;
634
635         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
636                                                     CPUSTAT_SM |
637                                                     CPUSTAT_STOPPED |
638                                                     CPUSTAT_GED);
639         vcpu->arch.sie_block->ecb   = 6;
640         if (test_vfacility(50) && test_vfacility(73))
641                 vcpu->arch.sie_block->ecb |= 0x10;
642
643         vcpu->arch.sie_block->ecb2  = 8;
644         vcpu->arch.sie_block->eca   = 0xD1002000U;
645         if (sclp_has_siif())
646                 vcpu->arch.sie_block->eca |= 1;
647         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
648         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
649                                       ICTL_TPROT;
650
651         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
652                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
653                 if (rc)
654                         return rc;
655         }
656         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
657         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
658         get_cpu_id(&vcpu->arch.cpu_id);
659         vcpu->arch.cpu_id.version = 0xff;
660         return rc;
661 }
662
663 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
664                                       unsigned int id)
665 {
666         struct kvm_vcpu *vcpu;
667         struct sie_page *sie_page;
668         int rc = -EINVAL;
669
670         if (id >= KVM_MAX_VCPUS)
671                 goto out;
672
673         rc = -ENOMEM;
674
675         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
676         if (!vcpu)
677                 goto out;
678
679         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
680         if (!sie_page)
681                 goto out_free_cpu;
682
683         vcpu->arch.sie_block = &sie_page->sie_block;
684         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
685
686         vcpu->arch.sie_block->icpua = id;
687         if (!kvm_is_ucontrol(kvm)) {
688                 if (!kvm->arch.sca) {
689                         WARN_ON_ONCE(1);
690                         goto out_free_cpu;
691                 }
692                 if (!kvm->arch.sca->cpu[id].sda)
693                         kvm->arch.sca->cpu[id].sda =
694                                 (__u64) vcpu->arch.sie_block;
695                 vcpu->arch.sie_block->scaoh =
696                         (__u32)(((__u64)kvm->arch.sca) >> 32);
697                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
698                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
699         }
700
701         spin_lock_init(&vcpu->arch.local_int.lock);
702         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
703         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
704         vcpu->arch.local_int.wq = &vcpu->wq;
705         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
706
707         rc = kvm_vcpu_init(vcpu, kvm, id);
708         if (rc)
709                 goto out_free_sie_block;
710         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
711                  vcpu->arch.sie_block);
712         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
713
714         return vcpu;
715 out_free_sie_block:
716         free_page((unsigned long)(vcpu->arch.sie_block));
717 out_free_cpu:
718         kmem_cache_free(kvm_vcpu_cache, vcpu);
719 out:
720         return ERR_PTR(rc);
721 }
722
723 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
724 {
725         return kvm_cpu_has_interrupt(vcpu);
726 }
727
728 void s390_vcpu_block(struct kvm_vcpu *vcpu)
729 {
730         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
731 }
732
733 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
734 {
735         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
736 }
737
738 /*
739  * Kick a guest cpu out of SIE and wait until SIE is not running.
740  * If the CPU is not running (e.g. waiting as idle) the function will
741  * return immediately. */
742 void exit_sie(struct kvm_vcpu *vcpu)
743 {
744         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
745         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
746                 cpu_relax();
747 }
748
749 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
750 void exit_sie_sync(struct kvm_vcpu *vcpu)
751 {
752         s390_vcpu_block(vcpu);
753         exit_sie(vcpu);
754 }
755
756 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
757 {
758         int i;
759         struct kvm *kvm = gmap->private;
760         struct kvm_vcpu *vcpu;
761
762         kvm_for_each_vcpu(i, vcpu, kvm) {
763                 /* match against both prefix pages */
764                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
765                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
766                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
767                         exit_sie_sync(vcpu);
768                 }
769         }
770 }
771
772 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
773 {
774         /* kvm common code refers to this, but never calls it */
775         BUG();
776         return 0;
777 }
778
779 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
780                                            struct kvm_one_reg *reg)
781 {
782         int r = -EINVAL;
783
784         switch (reg->id) {
785         case KVM_REG_S390_TODPR:
786                 r = put_user(vcpu->arch.sie_block->todpr,
787                              (u32 __user *)reg->addr);
788                 break;
789         case KVM_REG_S390_EPOCHDIFF:
790                 r = put_user(vcpu->arch.sie_block->epoch,
791                              (u64 __user *)reg->addr);
792                 break;
793         case KVM_REG_S390_CPU_TIMER:
794                 r = put_user(vcpu->arch.sie_block->cputm,
795                              (u64 __user *)reg->addr);
796                 break;
797         case KVM_REG_S390_CLOCK_COMP:
798                 r = put_user(vcpu->arch.sie_block->ckc,
799                              (u64 __user *)reg->addr);
800                 break;
801         case KVM_REG_S390_PFTOKEN:
802                 r = put_user(vcpu->arch.pfault_token,
803                              (u64 __user *)reg->addr);
804                 break;
805         case KVM_REG_S390_PFCOMPARE:
806                 r = put_user(vcpu->arch.pfault_compare,
807                              (u64 __user *)reg->addr);
808                 break;
809         case KVM_REG_S390_PFSELECT:
810                 r = put_user(vcpu->arch.pfault_select,
811                              (u64 __user *)reg->addr);
812                 break;
813         case KVM_REG_S390_PP:
814                 r = put_user(vcpu->arch.sie_block->pp,
815                              (u64 __user *)reg->addr);
816                 break;
817         case KVM_REG_S390_GBEA:
818                 r = put_user(vcpu->arch.sie_block->gbea,
819                              (u64 __user *)reg->addr);
820                 break;
821         default:
822                 break;
823         }
824
825         return r;
826 }
827
828 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
829                                            struct kvm_one_reg *reg)
830 {
831         int r = -EINVAL;
832
833         switch (reg->id) {
834         case KVM_REG_S390_TODPR:
835                 r = get_user(vcpu->arch.sie_block->todpr,
836                              (u32 __user *)reg->addr);
837                 break;
838         case KVM_REG_S390_EPOCHDIFF:
839                 r = get_user(vcpu->arch.sie_block->epoch,
840                              (u64 __user *)reg->addr);
841                 break;
842         case KVM_REG_S390_CPU_TIMER:
843                 r = get_user(vcpu->arch.sie_block->cputm,
844                              (u64 __user *)reg->addr);
845                 break;
846         case KVM_REG_S390_CLOCK_COMP:
847                 r = get_user(vcpu->arch.sie_block->ckc,
848                              (u64 __user *)reg->addr);
849                 break;
850         case KVM_REG_S390_PFTOKEN:
851                 r = get_user(vcpu->arch.pfault_token,
852                              (u64 __user *)reg->addr);
853                 break;
854         case KVM_REG_S390_PFCOMPARE:
855                 r = get_user(vcpu->arch.pfault_compare,
856                              (u64 __user *)reg->addr);
857                 break;
858         case KVM_REG_S390_PFSELECT:
859                 r = get_user(vcpu->arch.pfault_select,
860                              (u64 __user *)reg->addr);
861                 break;
862         case KVM_REG_S390_PP:
863                 r = get_user(vcpu->arch.sie_block->pp,
864                              (u64 __user *)reg->addr);
865                 break;
866         case KVM_REG_S390_GBEA:
867                 r = get_user(vcpu->arch.sie_block->gbea,
868                              (u64 __user *)reg->addr);
869                 break;
870         default:
871                 break;
872         }
873
874         return r;
875 }
876
877 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
878 {
879         kvm_s390_vcpu_initial_reset(vcpu);
880         return 0;
881 }
882
883 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
884 {
885         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
886         return 0;
887 }
888
889 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
890 {
891         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
892         return 0;
893 }
894
895 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
896                                   struct kvm_sregs *sregs)
897 {
898         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
899         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
900         restore_access_regs(vcpu->run->s.regs.acrs);
901         return 0;
902 }
903
904 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
905                                   struct kvm_sregs *sregs)
906 {
907         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
908         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
909         return 0;
910 }
911
912 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
913 {
914         if (test_fp_ctl(fpu->fpc))
915                 return -EINVAL;
916         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
917         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
918         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
919         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
920         return 0;
921 }
922
923 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
924 {
925         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
926         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
927         return 0;
928 }
929
930 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
931 {
932         int rc = 0;
933
934         if (!is_vcpu_stopped(vcpu))
935                 rc = -EBUSY;
936         else {
937                 vcpu->run->psw_mask = psw.mask;
938                 vcpu->run->psw_addr = psw.addr;
939         }
940         return rc;
941 }
942
943 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
944                                   struct kvm_translation *tr)
945 {
946         return -EINVAL; /* not implemented yet */
947 }
948
949 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
950                               KVM_GUESTDBG_USE_HW_BP | \
951                               KVM_GUESTDBG_ENABLE)
952
953 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
954                                         struct kvm_guest_debug *dbg)
955 {
956         int rc = 0;
957
958         vcpu->guest_debug = 0;
959         kvm_s390_clear_bp_data(vcpu);
960
961         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
962                 return -EINVAL;
963
964         if (dbg->control & KVM_GUESTDBG_ENABLE) {
965                 vcpu->guest_debug = dbg->control;
966                 /* enforce guest PER */
967                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
968
969                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
970                         rc = kvm_s390_import_bp_data(vcpu, dbg);
971         } else {
972                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
973                 vcpu->arch.guestdbg.last_bp = 0;
974         }
975
976         if (rc) {
977                 vcpu->guest_debug = 0;
978                 kvm_s390_clear_bp_data(vcpu);
979                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
980         }
981
982         return rc;
983 }
984
985 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
986                                     struct kvm_mp_state *mp_state)
987 {
988         /* CHECK_STOP and LOAD are not supported yet */
989         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
990                                        KVM_MP_STATE_OPERATING;
991 }
992
993 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
994                                     struct kvm_mp_state *mp_state)
995 {
996         int rc = 0;
997
998         /* user space knows about this interface - let it control the state */
999         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1000
1001         switch (mp_state->mp_state) {
1002         case KVM_MP_STATE_STOPPED:
1003                 kvm_s390_vcpu_stop(vcpu);
1004                 break;
1005         case KVM_MP_STATE_OPERATING:
1006                 kvm_s390_vcpu_start(vcpu);
1007                 break;
1008         case KVM_MP_STATE_LOAD:
1009         case KVM_MP_STATE_CHECK_STOP:
1010                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1011         default:
1012                 rc = -ENXIO;
1013         }
1014
1015         return rc;
1016 }
1017
1018 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1019 {
1020         if (!MACHINE_IS_LPAR)
1021                 return false;
1022         /* only enable for z10 and later */
1023         if (!MACHINE_HAS_EDAT1)
1024                 return false;
1025         if (!kvm->arch.use_cmma)
1026                 return false;
1027         return true;
1028 }
1029
1030 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1031 {
1032         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1033 }
1034
1035 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1036 {
1037 retry:
1038         s390_vcpu_unblock(vcpu);
1039         /*
1040          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1041          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1042          * This ensures that the ipte instruction for this request has
1043          * already finished. We might race against a second unmapper that
1044          * wants to set the blocking bit. Lets just retry the request loop.
1045          */
1046         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1047                 int rc;
1048                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1049                                       kvm_s390_get_prefix(vcpu),
1050                                       PAGE_SIZE * 2);
1051                 if (rc)
1052                         return rc;
1053                 goto retry;
1054         }
1055
1056         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1057                 if (!ibs_enabled(vcpu)) {
1058                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1059                         atomic_set_mask(CPUSTAT_IBS,
1060                                         &vcpu->arch.sie_block->cpuflags);
1061                 }
1062                 goto retry;
1063         }
1064
1065         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1066                 if (ibs_enabled(vcpu)) {
1067                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1068                         atomic_clear_mask(CPUSTAT_IBS,
1069                                           &vcpu->arch.sie_block->cpuflags);
1070                 }
1071                 goto retry;
1072         }
1073
1074         /* nothing to do, just clear the request */
1075         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1076
1077         return 0;
1078 }
1079
1080 /**
1081  * kvm_arch_fault_in_page - fault-in guest page if necessary
1082  * @vcpu: The corresponding virtual cpu
1083  * @gpa: Guest physical address
1084  * @writable: Whether the page should be writable or not
1085  *
1086  * Make sure that a guest page has been faulted-in on the host.
1087  *
1088  * Return: Zero on success, negative error code otherwise.
1089  */
1090 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1091 {
1092         struct mm_struct *mm = current->mm;
1093         hva_t hva;
1094         long rc;
1095
1096         hva = gmap_fault(gpa, vcpu->arch.gmap);
1097         if (IS_ERR_VALUE(hva))
1098                 return (long)hva;
1099         down_read(&mm->mmap_sem);
1100         rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
1101         up_read(&mm->mmap_sem);
1102
1103         return rc < 0 ? rc : 0;
1104 }
1105
1106 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1107                                       unsigned long token)
1108 {
1109         struct kvm_s390_interrupt inti;
1110         inti.parm64 = token;
1111
1112         if (start_token) {
1113                 inti.type = KVM_S390_INT_PFAULT_INIT;
1114                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1115         } else {
1116                 inti.type = KVM_S390_INT_PFAULT_DONE;
1117                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1118         }
1119 }
1120
1121 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1122                                      struct kvm_async_pf *work)
1123 {
1124         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1125         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1126 }
1127
1128 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1129                                  struct kvm_async_pf *work)
1130 {
1131         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1132         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1133 }
1134
1135 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1136                                struct kvm_async_pf *work)
1137 {
1138         /* s390 will always inject the page directly */
1139 }
1140
1141 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1142 {
1143         /*
1144          * s390 will always inject the page directly,
1145          * but we still want check_async_completion to cleanup
1146          */
1147         return true;
1148 }
1149
1150 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1151 {
1152         hva_t hva;
1153         struct kvm_arch_async_pf arch;
1154         int rc;
1155
1156         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1157                 return 0;
1158         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1159             vcpu->arch.pfault_compare)
1160                 return 0;
1161         if (psw_extint_disabled(vcpu))
1162                 return 0;
1163         if (kvm_cpu_has_interrupt(vcpu))
1164                 return 0;
1165         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1166                 return 0;
1167         if (!vcpu->arch.gmap->pfault_enabled)
1168                 return 0;
1169
1170         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1171         hva += current->thread.gmap_addr & ~PAGE_MASK;
1172         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1173                 return 0;
1174
1175         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1176         return rc;
1177 }
1178
1179 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1180 {
1181         int rc, cpuflags;
1182
1183         /*
1184          * On s390 notifications for arriving pages will be delivered directly
1185          * to the guest but the house keeping for completed pfaults is
1186          * handled outside the worker.
1187          */
1188         kvm_check_async_pf_completion(vcpu);
1189
1190         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1191
1192         if (need_resched())
1193                 schedule();
1194
1195         if (test_cpu_flag(CIF_MCCK_PENDING))
1196                 s390_handle_mcck();
1197
1198         if (!kvm_is_ucontrol(vcpu->kvm))
1199                 kvm_s390_deliver_pending_interrupts(vcpu);
1200
1201         rc = kvm_s390_handle_requests(vcpu);
1202         if (rc)
1203                 return rc;
1204
1205         if (guestdbg_enabled(vcpu)) {
1206                 kvm_s390_backup_guest_per_regs(vcpu);
1207                 kvm_s390_patch_guest_per_regs(vcpu);
1208         }
1209
1210         vcpu->arch.sie_block->icptcode = 0;
1211         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1212         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1213         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1214
1215         return 0;
1216 }
1217
1218 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1219 {
1220         int rc = -1;
1221
1222         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1223                    vcpu->arch.sie_block->icptcode);
1224         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1225
1226         if (guestdbg_enabled(vcpu))
1227                 kvm_s390_restore_guest_per_regs(vcpu);
1228
1229         if (exit_reason >= 0) {
1230                 rc = 0;
1231         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1232                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1233                 vcpu->run->s390_ucontrol.trans_exc_code =
1234                                                 current->thread.gmap_addr;
1235                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1236                 rc = -EREMOTE;
1237
1238         } else if (current->thread.gmap_pfault) {
1239                 trace_kvm_s390_major_guest_pfault(vcpu);
1240                 current->thread.gmap_pfault = 0;
1241                 if (kvm_arch_setup_async_pf(vcpu)) {
1242                         rc = 0;
1243                 } else {
1244                         gpa_t gpa = current->thread.gmap_addr;
1245                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1246                 }
1247         }
1248
1249         if (rc == -1) {
1250                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1251                 trace_kvm_s390_sie_fault(vcpu);
1252                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1253         }
1254
1255         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1256
1257         if (rc == 0) {
1258                 if (kvm_is_ucontrol(vcpu->kvm))
1259                         /* Don't exit for host interrupts. */
1260                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1261                 else
1262                         rc = kvm_handle_sie_intercept(vcpu);
1263         }
1264
1265         return rc;
1266 }
1267
1268 static int __vcpu_run(struct kvm_vcpu *vcpu)
1269 {
1270         int rc, exit_reason;
1271
1272         /*
1273          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1274          * ning the guest), so that memslots (and other stuff) are protected
1275          */
1276         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1277
1278         do {
1279                 rc = vcpu_pre_run(vcpu);
1280                 if (rc)
1281                         break;
1282
1283                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1284                 /*
1285                  * As PF_VCPU will be used in fault handler, between
1286                  * guest_enter and guest_exit should be no uaccess.
1287                  */
1288                 preempt_disable();
1289                 kvm_guest_enter();
1290                 preempt_enable();
1291                 exit_reason = sie64a(vcpu->arch.sie_block,
1292                                      vcpu->run->s.regs.gprs);
1293                 kvm_guest_exit();
1294                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1295
1296                 rc = vcpu_post_run(vcpu, exit_reason);
1297         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1298
1299         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1300         return rc;
1301 }
1302
1303 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1304 {
1305         int rc;
1306         sigset_t sigsaved;
1307
1308         if (guestdbg_exit_pending(vcpu)) {
1309                 kvm_s390_prepare_debug_exit(vcpu);
1310                 return 0;
1311         }
1312
1313         if (vcpu->sigset_active)
1314                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1315
1316         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1317                 kvm_s390_vcpu_start(vcpu);
1318         } else if (is_vcpu_stopped(vcpu)) {
1319                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1320                                    vcpu->vcpu_id);
1321                 return -EINVAL;
1322         }
1323
1324         switch (kvm_run->exit_reason) {
1325         case KVM_EXIT_S390_SIEIC:
1326         case KVM_EXIT_UNKNOWN:
1327         case KVM_EXIT_INTR:
1328         case KVM_EXIT_S390_RESET:
1329         case KVM_EXIT_S390_UCONTROL:
1330         case KVM_EXIT_S390_TSCH:
1331         case KVM_EXIT_DEBUG:
1332                 break;
1333         default:
1334                 BUG();
1335         }
1336
1337         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1338         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1339         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1340                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1341                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1342         }
1343         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1344                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1345                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1346                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1347         }
1348
1349         might_fault();
1350         rc = __vcpu_run(vcpu);
1351
1352         if (signal_pending(current) && !rc) {
1353                 kvm_run->exit_reason = KVM_EXIT_INTR;
1354                 rc = -EINTR;
1355         }
1356
1357         if (guestdbg_exit_pending(vcpu) && !rc)  {
1358                 kvm_s390_prepare_debug_exit(vcpu);
1359                 rc = 0;
1360         }
1361
1362         if (rc == -EOPNOTSUPP) {
1363                 /* intercept cannot be handled in-kernel, prepare kvm-run */
1364                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1365                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1366                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1367                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1368                 rc = 0;
1369         }
1370
1371         if (rc == -EREMOTE) {
1372                 /* intercept was handled, but userspace support is needed
1373                  * kvm_run has been prepared by the handler */
1374                 rc = 0;
1375         }
1376
1377         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
1378         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
1379         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1380         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1381
1382         if (vcpu->sigset_active)
1383                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1384
1385         vcpu->stat.exit_userspace++;
1386         return rc;
1387 }
1388
1389 /*
1390  * store status at address
1391  * we use have two special cases:
1392  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1393  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1394  */
1395 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1396 {
1397         unsigned char archmode = 1;
1398         unsigned int px;
1399         u64 clkcomp;
1400         int rc;
1401
1402         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1403                 if (write_guest_abs(vcpu, 163, &archmode, 1))
1404                         return -EFAULT;
1405                 gpa = SAVE_AREA_BASE;
1406         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1407                 if (write_guest_real(vcpu, 163, &archmode, 1))
1408                         return -EFAULT;
1409                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1410         }
1411         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1412                              vcpu->arch.guest_fpregs.fprs, 128);
1413         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1414                               vcpu->run->s.regs.gprs, 128);
1415         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1416                               &vcpu->arch.sie_block->gpsw, 16);
1417         px = kvm_s390_get_prefix(vcpu);
1418         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1419                               &px, 4);
1420         rc |= write_guest_abs(vcpu,
1421                               gpa + offsetof(struct save_area, fp_ctrl_reg),
1422                               &vcpu->arch.guest_fpregs.fpc, 4);
1423         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1424                               &vcpu->arch.sie_block->todpr, 4);
1425         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1426                               &vcpu->arch.sie_block->cputm, 8);
1427         clkcomp = vcpu->arch.sie_block->ckc >> 8;
1428         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1429                               &clkcomp, 8);
1430         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1431                               &vcpu->run->s.regs.acrs, 64);
1432         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1433                               &vcpu->arch.sie_block->gcr, 128);
1434         return rc ? -EFAULT : 0;
1435 }
1436
1437 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1438 {
1439         /*
1440          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1441          * copying in vcpu load/put. Lets update our copies before we save
1442          * it into the save area
1443          */
1444         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1445         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1446         save_access_regs(vcpu->run->s.regs.acrs);
1447
1448         return kvm_s390_store_status_unloaded(vcpu, addr);
1449 }
1450
1451 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1452 {
1453         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1454         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1455         exit_sie_sync(vcpu);
1456 }
1457
1458 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1459 {
1460         unsigned int i;
1461         struct kvm_vcpu *vcpu;
1462
1463         kvm_for_each_vcpu(i, vcpu, kvm) {
1464                 __disable_ibs_on_vcpu(vcpu);
1465         }
1466 }
1467
1468 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1469 {
1470         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1471         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1472         exit_sie_sync(vcpu);
1473 }
1474
1475 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1476 {
1477         int i, online_vcpus, started_vcpus = 0;
1478
1479         if (!is_vcpu_stopped(vcpu))
1480                 return;
1481
1482         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1483         /* Only one cpu at a time may enter/leave the STOPPED state. */
1484         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1485         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1486
1487         for (i = 0; i < online_vcpus; i++) {
1488                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1489                         started_vcpus++;
1490         }
1491
1492         if (started_vcpus == 0) {
1493                 /* we're the only active VCPU -> speed it up */
1494                 __enable_ibs_on_vcpu(vcpu);
1495         } else if (started_vcpus == 1) {
1496                 /*
1497                  * As we are starting a second VCPU, we have to disable
1498                  * the IBS facility on all VCPUs to remove potentially
1499                  * oustanding ENABLE requests.
1500                  */
1501                 __disable_ibs_on_all_vcpus(vcpu->kvm);
1502         }
1503
1504         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1505         /*
1506          * Another VCPU might have used IBS while we were offline.
1507          * Let's play safe and flush the VCPU at startup.
1508          */
1509         vcpu->arch.sie_block->ihcpu  = 0xffff;
1510         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1511         return;
1512 }
1513
1514 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1515 {
1516         int i, online_vcpus, started_vcpus = 0;
1517         struct kvm_vcpu *started_vcpu = NULL;
1518
1519         if (is_vcpu_stopped(vcpu))
1520                 return;
1521
1522         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1523         /* Only one cpu at a time may enter/leave the STOPPED state. */
1524         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1525         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1526
1527         /* Need to lock access to action_bits to avoid a SIGP race condition */
1528         spin_lock(&vcpu->arch.local_int.lock);
1529         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1530
1531         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1532         vcpu->arch.local_int.action_bits &=
1533                                  ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1534         spin_unlock(&vcpu->arch.local_int.lock);
1535
1536         __disable_ibs_on_vcpu(vcpu);
1537
1538         for (i = 0; i < online_vcpus; i++) {
1539                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1540                         started_vcpus++;
1541                         started_vcpu = vcpu->kvm->vcpus[i];
1542                 }
1543         }
1544
1545         if (started_vcpus == 1) {
1546                 /*
1547                  * As we only have one VCPU left, we want to enable the
1548                  * IBS facility for that VCPU to speed it up.
1549                  */
1550                 __enable_ibs_on_vcpu(started_vcpu);
1551         }
1552
1553         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1554         return;
1555 }
1556
1557 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1558                                      struct kvm_enable_cap *cap)
1559 {
1560         int r;
1561
1562         if (cap->flags)
1563                 return -EINVAL;
1564
1565         switch (cap->cap) {
1566         case KVM_CAP_S390_CSS_SUPPORT:
1567                 if (!vcpu->kvm->arch.css_support) {
1568                         vcpu->kvm->arch.css_support = 1;
1569                         trace_kvm_s390_enable_css(vcpu->kvm);
1570                 }
1571                 r = 0;
1572                 break;
1573         default:
1574                 r = -EINVAL;
1575                 break;
1576         }
1577         return r;
1578 }
1579
1580 long kvm_arch_vcpu_ioctl(struct file *filp,
1581                          unsigned int ioctl, unsigned long arg)
1582 {
1583         struct kvm_vcpu *vcpu = filp->private_data;
1584         void __user *argp = (void __user *)arg;
1585         int idx;
1586         long r;
1587
1588         switch (ioctl) {
1589         case KVM_S390_INTERRUPT: {
1590                 struct kvm_s390_interrupt s390int;
1591
1592                 r = -EFAULT;
1593                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1594                         break;
1595                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1596                 break;
1597         }
1598         case KVM_S390_STORE_STATUS:
1599                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1600                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1601                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1602                 break;
1603         case KVM_S390_SET_INITIAL_PSW: {
1604                 psw_t psw;
1605
1606                 r = -EFAULT;
1607                 if (copy_from_user(&psw, argp, sizeof(psw)))
1608                         break;
1609                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1610                 break;
1611         }
1612         case KVM_S390_INITIAL_RESET:
1613                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1614                 break;
1615         case KVM_SET_ONE_REG:
1616         case KVM_GET_ONE_REG: {
1617                 struct kvm_one_reg reg;
1618                 r = -EFAULT;
1619                 if (copy_from_user(&reg, argp, sizeof(reg)))
1620                         break;
1621                 if (ioctl == KVM_SET_ONE_REG)
1622                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1623                 else
1624                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1625                 break;
1626         }
1627 #ifdef CONFIG_KVM_S390_UCONTROL
1628         case KVM_S390_UCAS_MAP: {
1629                 struct kvm_s390_ucas_mapping ucasmap;
1630
1631                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1632                         r = -EFAULT;
1633                         break;
1634                 }
1635
1636                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1637                         r = -EINVAL;
1638                         break;
1639                 }
1640
1641                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1642                                      ucasmap.vcpu_addr, ucasmap.length);
1643                 break;
1644         }
1645         case KVM_S390_UCAS_UNMAP: {
1646                 struct kvm_s390_ucas_mapping ucasmap;
1647
1648                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1649                         r = -EFAULT;
1650                         break;
1651                 }
1652
1653                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1654                         r = -EINVAL;
1655                         break;
1656                 }
1657
1658                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1659                         ucasmap.length);
1660                 break;
1661         }
1662 #endif
1663         case KVM_S390_VCPU_FAULT: {
1664                 r = gmap_fault(arg, vcpu->arch.gmap);
1665                 if (!IS_ERR_VALUE(r))
1666                         r = 0;
1667                 break;
1668         }
1669         case KVM_ENABLE_CAP:
1670         {
1671                 struct kvm_enable_cap cap;
1672                 r = -EFAULT;
1673                 if (copy_from_user(&cap, argp, sizeof(cap)))
1674                         break;
1675                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1676                 break;
1677         }
1678         default:
1679                 r = -ENOTTY;
1680         }
1681         return r;
1682 }
1683
1684 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1685 {
1686 #ifdef CONFIG_KVM_S390_UCONTROL
1687         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1688                  && (kvm_is_ucontrol(vcpu->kvm))) {
1689                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1690                 get_page(vmf->page);
1691                 return 0;
1692         }
1693 #endif
1694         return VM_FAULT_SIGBUS;
1695 }
1696
1697 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1698                            struct kvm_memory_slot *dont)
1699 {
1700 }
1701
1702 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1703                             unsigned long npages)
1704 {
1705         return 0;
1706 }
1707
1708 void kvm_arch_memslots_updated(struct kvm *kvm)
1709 {
1710 }
1711
1712 /* Section: memory related */
1713 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1714                                    struct kvm_memory_slot *memslot,
1715                                    struct kvm_userspace_memory_region *mem,
1716                                    enum kvm_mr_change change)
1717 {
1718         /* A few sanity checks. We can have memory slots which have to be
1719            located/ended at a segment boundary (1MB). The memory in userland is
1720            ok to be fragmented into various different vmas. It is okay to mmap()
1721            and munmap() stuff in this slot after doing this call at any time */
1722
1723         if (mem->userspace_addr & 0xffffful)
1724                 return -EINVAL;
1725
1726         if (mem->memory_size & 0xffffful)
1727                 return -EINVAL;
1728
1729         return 0;
1730 }
1731
1732 void kvm_arch_commit_memory_region(struct kvm *kvm,
1733                                 struct kvm_userspace_memory_region *mem,
1734                                 const struct kvm_memory_slot *old,
1735                                 enum kvm_mr_change change)
1736 {
1737         int rc;
1738
1739         /* If the basics of the memslot do not change, we do not want
1740          * to update the gmap. Every update causes several unnecessary
1741          * segment translation exceptions. This is usually handled just
1742          * fine by the normal fault handler + gmap, but it will also
1743          * cause faults on the prefix page of running guest CPUs.
1744          */
1745         if (old->userspace_addr == mem->userspace_addr &&
1746             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1747             old->npages * PAGE_SIZE == mem->memory_size)
1748                 return;
1749
1750         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1751                 mem->guest_phys_addr, mem->memory_size);
1752         if (rc)
1753                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1754         return;
1755 }
1756
1757 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1758 {
1759 }
1760
1761 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1762                                    struct kvm_memory_slot *slot)
1763 {
1764 }
1765
1766 static int __init kvm_s390_init(void)
1767 {
1768         int ret;
1769         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1770         if (ret)
1771                 return ret;
1772
1773         /*
1774          * guests can ask for up to 255+1 double words, we need a full page
1775          * to hold the maximum amount of facilities. On the other hand, we
1776          * only set facilities that are known to work in KVM.
1777          */
1778         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1779         if (!vfacilities) {
1780                 kvm_exit();
1781                 return -ENOMEM;
1782         }
1783         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1784         vfacilities[0] &= 0xff82fff3f4fc2000UL;
1785         vfacilities[1] &= 0x005c000000000000UL;
1786         return 0;
1787 }
1788
1789 static void __exit kvm_s390_exit(void)
1790 {
1791         free_page((unsigned long) vfacilities);
1792         kvm_exit();
1793 }
1794
1795 module_init(kvm_s390_init);
1796 module_exit(kvm_s390_exit);
1797
1798 /*
1799  * Enable autoloading of the kvm module.
1800  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1801  * since x86 takes a different approach.
1802  */
1803 #include <linux/miscdevice.h>
1804 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1805 MODULE_ALIAS("devname:kvm");