]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/s390/kvm/kvm-s390.c
[S390] use gmap address spaces for kvm guest images
[mv-sheeva.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72         { "diagnose_44", VCPU_STAT(diagnose_44) },
73         { NULL }
74 };
75
76 static unsigned long long *facilities;
77
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
80 {
81         /* every s390 is virtualization enabled ;-) */
82         return 0;
83 }
84
85 void kvm_arch_hardware_disable(void *garbage)
86 {
87 }
88
89 int kvm_arch_hardware_setup(void)
90 {
91         return 0;
92 }
93
94 void kvm_arch_hardware_unsetup(void)
95 {
96 }
97
98 void kvm_arch_check_processor_compat(void *rtn)
99 {
100 }
101
102 int kvm_arch_init(void *opaque)
103 {
104         return 0;
105 }
106
107 void kvm_arch_exit(void)
108 {
109 }
110
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113                         unsigned int ioctl, unsigned long arg)
114 {
115         if (ioctl == KVM_S390_ENABLE_SIE)
116                 return s390_enable_sie();
117         return -EINVAL;
118 }
119
120 int kvm_dev_ioctl_check_extension(long ext)
121 {
122         int r;
123
124         switch (ext) {
125         case KVM_CAP_S390_PSW:
126                 r = 1;
127                 break;
128         default:
129                 r = 0;
130         }
131         return r;
132 }
133
134 /* Section: vm related */
135 /*
136  * Get (and clear) the dirty memory log for a memory slot.
137  */
138 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139                                struct kvm_dirty_log *log)
140 {
141         return 0;
142 }
143
144 long kvm_arch_vm_ioctl(struct file *filp,
145                        unsigned int ioctl, unsigned long arg)
146 {
147         struct kvm *kvm = filp->private_data;
148         void __user *argp = (void __user *)arg;
149         int r;
150
151         switch (ioctl) {
152         case KVM_S390_INTERRUPT: {
153                 struct kvm_s390_interrupt s390int;
154
155                 r = -EFAULT;
156                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
157                         break;
158                 r = kvm_s390_inject_vm(kvm, &s390int);
159                 break;
160         }
161         default:
162                 r = -ENOTTY;
163         }
164
165         return r;
166 }
167
168 int kvm_arch_init_vm(struct kvm *kvm)
169 {
170         int rc;
171         char debug_name[16];
172
173         rc = s390_enable_sie();
174         if (rc)
175                 goto out_err;
176
177         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
178         if (!kvm->arch.sca)
179                 goto out_err;
180
181         sprintf(debug_name, "kvm-%u", current->pid);
182
183         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
184         if (!kvm->arch.dbf)
185                 goto out_nodbf;
186
187         spin_lock_init(&kvm->arch.float_int.lock);
188         INIT_LIST_HEAD(&kvm->arch.float_int.list);
189
190         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191         VM_EVENT(kvm, 3, "%s", "vm created");
192
193         kvm->arch.gmap = gmap_alloc(current->mm);
194         if (!kvm->arch.gmap)
195                 goto out_nogmap;
196
197         return 0;
198 out_nogmap:
199         debug_unregister(kvm->arch.dbf);
200 out_nodbf:
201         free_page((unsigned long)(kvm->arch.sca));
202 out_err:
203         return rc;
204 }
205
206 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
207 {
208         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
209         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
210         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211                 (__u64) vcpu->arch.sie_block)
212                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213         smp_mb();
214         free_page((unsigned long)(vcpu->arch.sie_block));
215         kvm_vcpu_uninit(vcpu);
216         kfree(vcpu);
217 }
218
219 static void kvm_free_vcpus(struct kvm *kvm)
220 {
221         unsigned int i;
222         struct kvm_vcpu *vcpu;
223
224         kvm_for_each_vcpu(i, vcpu, kvm)
225                 kvm_arch_vcpu_destroy(vcpu);
226
227         mutex_lock(&kvm->lock);
228         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229                 kvm->vcpus[i] = NULL;
230
231         atomic_set(&kvm->online_vcpus, 0);
232         mutex_unlock(&kvm->lock);
233 }
234
235 void kvm_arch_sync_events(struct kvm *kvm)
236 {
237 }
238
239 void kvm_arch_destroy_vm(struct kvm *kvm)
240 {
241         kvm_free_vcpus(kvm);
242         free_page((unsigned long)(kvm->arch.sca));
243         debug_unregister(kvm->arch.dbf);
244         gmap_free(kvm->arch.gmap);
245 }
246
247 /* Section: vcpu related */
248 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
249 {
250         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
251         return 0;
252 }
253
254 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
255 {
256         /* Nothing todo */
257 }
258
259 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260 {
261         save_fp_regs(&vcpu->arch.host_fpregs);
262         save_access_regs(vcpu->arch.host_acrs);
263         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264         restore_fp_regs(&vcpu->arch.guest_fpregs);
265         restore_access_regs(vcpu->arch.guest_acrs);
266 }
267
268 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269 {
270         save_fp_regs(&vcpu->arch.guest_fpregs);
271         save_access_regs(vcpu->arch.guest_acrs);
272         restore_fp_regs(&vcpu->arch.host_fpregs);
273         restore_access_regs(vcpu->arch.host_acrs);
274 }
275
276 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
277 {
278         /* this equals initial cpu reset in pop, but we don't switch to ESA */
279         vcpu->arch.sie_block->gpsw.mask = 0UL;
280         vcpu->arch.sie_block->gpsw.addr = 0UL;
281         vcpu->arch.sie_block->prefix    = 0UL;
282         vcpu->arch.sie_block->ihcpu     = 0xffff;
283         vcpu->arch.sie_block->cputm     = 0UL;
284         vcpu->arch.sie_block->ckc       = 0UL;
285         vcpu->arch.sie_block->todpr     = 0;
286         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
287         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
288         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
289         vcpu->arch.guest_fpregs.fpc = 0;
290         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
291         vcpu->arch.sie_block->gbea = 1;
292 }
293
294 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
295 {
296         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
297         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
298         vcpu->arch.sie_block->ecb   = 6;
299         vcpu->arch.sie_block->eca   = 0xC1002001U;
300         vcpu->arch.sie_block->fac   = (int) (long) facilities;
301         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
302         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
303                      (unsigned long) vcpu);
304         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
305         get_cpu_id(&vcpu->arch.cpu_id);
306         vcpu->arch.cpu_id.version = 0xff;
307         return 0;
308 }
309
310 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
311                                       unsigned int id)
312 {
313         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
314         int rc = -ENOMEM;
315
316         if (!vcpu)
317                 goto out_nomem;
318
319         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
320                                         get_zeroed_page(GFP_KERNEL);
321
322         if (!vcpu->arch.sie_block)
323                 goto out_free_cpu;
324
325         vcpu->arch.sie_block->icpua = id;
326         BUG_ON(!kvm->arch.sca);
327         if (!kvm->arch.sca->cpu[id].sda)
328                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
329         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
330         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
331         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
332
333         spin_lock_init(&vcpu->arch.local_int.lock);
334         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336         spin_lock(&kvm->arch.float_int.lock);
337         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338         init_waitqueue_head(&vcpu->arch.local_int.wq);
339         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340         spin_unlock(&kvm->arch.float_int.lock);
341
342         rc = kvm_vcpu_init(vcpu, kvm, id);
343         if (rc)
344                 goto out_free_sie_block;
345         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346                  vcpu->arch.sie_block);
347
348         return vcpu;
349 out_free_sie_block:
350         free_page((unsigned long)(vcpu->arch.sie_block));
351 out_free_cpu:
352         kfree(vcpu);
353 out_nomem:
354         return ERR_PTR(rc);
355 }
356
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358 {
359         /* kvm common code refers to this, but never calls it */
360         BUG();
361         return 0;
362 }
363
364 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365 {
366         kvm_s390_vcpu_initial_reset(vcpu);
367         return 0;
368 }
369
370 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371 {
372         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
373         return 0;
374 }
375
376 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377 {
378         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
379         return 0;
380 }
381
382 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383                                   struct kvm_sregs *sregs)
384 {
385         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
386         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
387         return 0;
388 }
389
390 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391                                   struct kvm_sregs *sregs)
392 {
393         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
394         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
395         return 0;
396 }
397
398 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
399 {
400         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402         return 0;
403 }
404
405 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406 {
407         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
408         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
409         return 0;
410 }
411
412 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
413 {
414         int rc = 0;
415
416         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
417                 rc = -EBUSY;
418         else {
419                 vcpu->run->psw_mask = psw.mask;
420                 vcpu->run->psw_addr = psw.addr;
421         }
422         return rc;
423 }
424
425 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
426                                   struct kvm_translation *tr)
427 {
428         return -EINVAL; /* not implemented yet */
429 }
430
431 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432                                         struct kvm_guest_debug *dbg)
433 {
434         return -EINVAL; /* not implemented yet */
435 }
436
437 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
438                                     struct kvm_mp_state *mp_state)
439 {
440         return -EINVAL; /* not implemented yet */
441 }
442
443 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444                                     struct kvm_mp_state *mp_state)
445 {
446         return -EINVAL; /* not implemented yet */
447 }
448
449 static void __vcpu_run(struct kvm_vcpu *vcpu)
450 {
451         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
452
453         if (need_resched())
454                 schedule();
455
456         if (test_thread_flag(TIF_MCCK_PENDING))
457                 s390_handle_mcck();
458
459         kvm_s390_deliver_pending_interrupts(vcpu);
460
461         vcpu->arch.sie_block->icptcode = 0;
462         local_irq_disable();
463         kvm_guest_enter();
464         local_irq_enable();
465         gmap_enable(vcpu->arch.gmap);
466         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
467                    atomic_read(&vcpu->arch.sie_block->cpuflags));
468         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
469                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
470                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
471         }
472         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
473                    vcpu->arch.sie_block->icptcode);
474         gmap_disable(vcpu->arch.gmap);
475         local_irq_disable();
476         kvm_guest_exit();
477         local_irq_enable();
478
479         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480 }
481
482 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483 {
484         int rc;
485         sigset_t sigsaved;
486
487 rerun_vcpu:
488         if (vcpu->requests)
489                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
490                         kvm_s390_vcpu_set_mem(vcpu);
491
492         if (vcpu->sigset_active)
493                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
494
495         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
496
497         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
498
499         switch (kvm_run->exit_reason) {
500         case KVM_EXIT_S390_SIEIC:
501         case KVM_EXIT_UNKNOWN:
502         case KVM_EXIT_INTR:
503         case KVM_EXIT_S390_RESET:
504                 break;
505         default:
506                 BUG();
507         }
508
509         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
510         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
511
512         might_fault();
513
514         do {
515                 __vcpu_run(vcpu);
516                 rc = kvm_handle_sie_intercept(vcpu);
517         } while (!signal_pending(current) && !rc);
518
519         if (rc == SIE_INTERCEPT_RERUNVCPU)
520                 goto rerun_vcpu;
521
522         if (signal_pending(current) && !rc) {
523                 kvm_run->exit_reason = KVM_EXIT_INTR;
524                 rc = -EINTR;
525         }
526
527         if (rc == -EOPNOTSUPP) {
528                 /* intercept cannot be handled in-kernel, prepare kvm-run */
529                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
530                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
531                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
532                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
533                 rc = 0;
534         }
535
536         if (rc == -EREMOTE) {
537                 /* intercept was handled, but userspace support is needed
538                  * kvm_run has been prepared by the handler */
539                 rc = 0;
540         }
541
542         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
543         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
544
545         if (vcpu->sigset_active)
546                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
547
548         vcpu->stat.exit_userspace++;
549         return rc;
550 }
551
552 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
553                        unsigned long n, int prefix)
554 {
555         if (prefix)
556                 return copy_to_guest(vcpu, guestdest, from, n);
557         else
558                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
559 }
560
561 /*
562  * store status at address
563  * we use have two special cases:
564  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
565  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
566  */
567 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
568 {
569         const unsigned char archmode = 1;
570         int prefix;
571
572         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
573                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
574                         return -EFAULT;
575                 addr = SAVE_AREA_BASE;
576                 prefix = 0;
577         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
578                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
579                         return -EFAULT;
580                 addr = SAVE_AREA_BASE;
581                 prefix = 1;
582         } else
583                 prefix = 0;
584
585         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
586                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
587                 return -EFAULT;
588
589         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
590                         vcpu->arch.guest_gprs, 128, prefix))
591                 return -EFAULT;
592
593         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
594                         &vcpu->arch.sie_block->gpsw, 16, prefix))
595                 return -EFAULT;
596
597         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
598                         &vcpu->arch.sie_block->prefix, 4, prefix))
599                 return -EFAULT;
600
601         if (__guestcopy(vcpu,
602                         addr + offsetof(struct save_area, fp_ctrl_reg),
603                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
604                 return -EFAULT;
605
606         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
607                         &vcpu->arch.sie_block->todpr, 4, prefix))
608                 return -EFAULT;
609
610         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
611                         &vcpu->arch.sie_block->cputm, 8, prefix))
612                 return -EFAULT;
613
614         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
615                         &vcpu->arch.sie_block->ckc, 8, prefix))
616                 return -EFAULT;
617
618         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
619                         &vcpu->arch.guest_acrs, 64, prefix))
620                 return -EFAULT;
621
622         if (__guestcopy(vcpu,
623                         addr + offsetof(struct save_area, ctrl_regs),
624                         &vcpu->arch.sie_block->gcr, 128, prefix))
625                 return -EFAULT;
626         return 0;
627 }
628
629 long kvm_arch_vcpu_ioctl(struct file *filp,
630                          unsigned int ioctl, unsigned long arg)
631 {
632         struct kvm_vcpu *vcpu = filp->private_data;
633         void __user *argp = (void __user *)arg;
634         long r;
635
636         switch (ioctl) {
637         case KVM_S390_INTERRUPT: {
638                 struct kvm_s390_interrupt s390int;
639
640                 r = -EFAULT;
641                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
642                         break;
643                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
644                 break;
645         }
646         case KVM_S390_STORE_STATUS:
647                 r = kvm_s390_vcpu_store_status(vcpu, arg);
648                 break;
649         case KVM_S390_SET_INITIAL_PSW: {
650                 psw_t psw;
651
652                 r = -EFAULT;
653                 if (copy_from_user(&psw, argp, sizeof(psw)))
654                         break;
655                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
656                 break;
657         }
658         case KVM_S390_INITIAL_RESET:
659                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
660                 break;
661         default:
662                 r = -EINVAL;
663         }
664         return r;
665 }
666
667 /* Section: memory related */
668 int kvm_arch_prepare_memory_region(struct kvm *kvm,
669                                    struct kvm_memory_slot *memslot,
670                                    struct kvm_memory_slot old,
671                                    struct kvm_userspace_memory_region *mem,
672                                    int user_alloc)
673 {
674         /* A few sanity checks. We can have exactly one memory slot which has
675            to start at guest virtual zero and which has to be located at a
676            page boundary in userland and which has to end at a page boundary.
677            The memory in userland is ok to be fragmented into various different
678            vmas. It is okay to mmap() and munmap() stuff in this slot after
679            doing this call at any time */
680
681         if (mem->slot)
682                 return -EINVAL;
683
684         if (mem->guest_phys_addr)
685                 return -EINVAL;
686
687         if (mem->userspace_addr & 0xffffful)
688                 return -EINVAL;
689
690         if (mem->memory_size & 0xffffful)
691                 return -EINVAL;
692
693         if (!user_alloc)
694                 return -EINVAL;
695
696         return 0;
697 }
698
699 void kvm_arch_commit_memory_region(struct kvm *kvm,
700                                 struct kvm_userspace_memory_region *mem,
701                                 struct kvm_memory_slot old,
702                                 int user_alloc)
703 {
704         int i, rc;
705         struct kvm_vcpu *vcpu;
706
707
708         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
709                 mem->guest_phys_addr, mem->memory_size);
710         if (rc)
711                 return;
712
713         /* request update of sie control block for all available vcpus */
714         kvm_for_each_vcpu(i, vcpu, kvm) {
715                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
716                         continue;
717                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
718         }
719         return;
720 }
721
722 void kvm_arch_flush_shadow(struct kvm *kvm)
723 {
724 }
725
726 static int __init kvm_s390_init(void)
727 {
728         int ret;
729         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
730         if (ret)
731                 return ret;
732
733         /*
734          * guests can ask for up to 255+1 double words, we need a full page
735          * to hold the maximum amount of facilities. On the other hand, we
736          * only set facilities that are known to work in KVM.
737          */
738         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
739         if (!facilities) {
740                 kvm_exit();
741                 return -ENOMEM;
742         }
743         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
744         facilities[0] &= 0xff00fff3f47c0000ULL;
745         facilities[1] &= 0x201c000000000000ULL;
746         return 0;
747 }
748
749 static void __exit kvm_s390_exit(void)
750 {
751         free_page((unsigned long) facilities);
752         kvm_exit();
753 }
754
755 module_init(kvm_s390_init);
756 module_exit(kvm_s390_exit);