2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71 { "diagnose_44", VCPU_STAT(diagnose_44) },
75 static unsigned long long *facilities;
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
80 /* every s390 is virtualization enabled ;-) */
84 void kvm_arch_hardware_disable(void *garbage)
88 int kvm_arch_hardware_setup(void)
93 void kvm_arch_hardware_unsetup(void)
97 void kvm_arch_check_processor_compat(void *rtn)
101 int kvm_arch_init(void *opaque)
106 void kvm_arch_exit(void)
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
119 int kvm_dev_ioctl_check_extension(long ext)
124 case KVM_CAP_S390_PSW:
133 /* Section: vm related */
135 * Get (and clear) the dirty memory log for a memory slot.
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
143 long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
157 r = kvm_s390_inject_vm(kvm, &s390int);
167 struct kvm *kvm_arch_create_vm(void)
173 rc = s390_enable_sie();
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186 sprintf(debug_name, "kvm-%u", current->pid);
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
200 free_page((unsigned long)(kvm->arch.sca));
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214 free_page((unsigned long)(vcpu->arch.sie_block));
215 kvm_vcpu_uninit(vcpu);
219 static void kvm_free_vcpus(struct kvm *kvm)
222 struct kvm_vcpu *vcpu;
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
235 void kvm_arch_sync_events(struct kvm *kvm)
239 void kvm_arch_destroy_vm(struct kvm *kvm)
242 kvm_free_physmem(kvm);
243 free_page((unsigned long)(kvm->arch.sca));
244 debug_unregister(kvm->arch.dbf);
245 cleanup_srcu_struct(&kvm->srcu);
249 /* Section: vcpu related */
250 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
269 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
271 save_fp_regs(&vcpu->arch.guest_fpregs);
272 save_access_regs(vcpu->arch.guest_acrs);
273 restore_fp_regs(&vcpu->arch.host_fpregs);
274 restore_access_regs(vcpu->arch.host_acrs);
277 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
279 /* this equals initial cpu reset in pop, but we don't switch to ESA */
280 vcpu->arch.sie_block->gpsw.mask = 0UL;
281 vcpu->arch.sie_block->gpsw.addr = 0UL;
282 vcpu->arch.sie_block->prefix = 0UL;
283 vcpu->arch.sie_block->ihcpu = 0xffff;
284 vcpu->arch.sie_block->cputm = 0UL;
285 vcpu->arch.sie_block->ckc = 0UL;
286 vcpu->arch.sie_block->todpr = 0;
287 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
289 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290 vcpu->arch.guest_fpregs.fpc = 0;
291 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292 vcpu->arch.sie_block->gbea = 1;
295 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299 vcpu->arch.sie_block->ecb = 2;
300 vcpu->arch.sie_block->eca = 0xC1002001U;
301 vcpu->arch.sie_block->fac = (int) (long) facilities;
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304 (unsigned long) vcpu);
305 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
306 get_cpu_id(&vcpu->arch.cpu_id);
307 vcpu->arch.cpu_id.version = 0xff;
311 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
314 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
320 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321 get_zeroed_page(GFP_KERNEL);
323 if (!vcpu->arch.sie_block)
326 vcpu->arch.sie_block->icpua = id;
327 BUG_ON(!kvm->arch.sca);
328 if (!kvm->arch.sca->cpu[id].sda)
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336 spin_lock(&kvm->arch.float_int.lock);
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340 spin_unlock(&kvm->arch.float_int.lock);
342 rc = kvm_vcpu_init(vcpu, kvm, id);
344 goto out_free_sie_block;
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
350 free_page((unsigned long)(vcpu->arch.sie_block));
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
359 /* kvm common code refers to this, but never calls it */
364 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
367 kvm_s390_vcpu_initial_reset(vcpu);
372 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
375 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
380 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
383 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
388 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
389 struct kvm_sregs *sregs)
392 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
393 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
398 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
399 struct kvm_sregs *sregs)
402 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
403 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
408 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
411 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
412 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
417 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
420 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
421 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
426 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
434 vcpu->run->psw_mask = psw.mask;
435 vcpu->run->psw_addr = psw.addr;
441 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
442 struct kvm_translation *tr)
444 return -EINVAL; /* not implemented yet */
447 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
448 struct kvm_guest_debug *dbg)
450 return -EINVAL; /* not implemented yet */
453 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
454 struct kvm_mp_state *mp_state)
456 return -EINVAL; /* not implemented yet */
459 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
460 struct kvm_mp_state *mp_state)
462 return -EINVAL; /* not implemented yet */
465 static void __vcpu_run(struct kvm_vcpu *vcpu)
467 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
472 if (test_thread_flag(TIF_MCCK_PENDING))
475 kvm_s390_deliver_pending_interrupts(vcpu);
477 vcpu->arch.sie_block->icptcode = 0;
481 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
482 atomic_read(&vcpu->arch.sie_block->cpuflags));
483 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
484 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
485 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
487 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
488 vcpu->arch.sie_block->icptcode);
493 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
496 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
505 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
506 kvm_s390_vcpu_set_mem(vcpu);
508 /* verify, that memory has been registered */
509 if (!vcpu->arch.sie_block->gmslm) {
511 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
515 if (vcpu->sigset_active)
516 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
518 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
520 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
522 switch (kvm_run->exit_reason) {
523 case KVM_EXIT_S390_SIEIC:
524 case KVM_EXIT_UNKNOWN:
526 case KVM_EXIT_S390_RESET:
532 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
533 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
539 rc = kvm_handle_sie_intercept(vcpu);
540 } while (!signal_pending(current) && !rc);
542 if (rc == SIE_INTERCEPT_RERUNVCPU)
545 if (signal_pending(current) && !rc) {
546 kvm_run->exit_reason = KVM_EXIT_INTR;
550 if (rc == -EOPNOTSUPP) {
551 /* intercept cannot be handled in-kernel, prepare kvm-run */
552 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
553 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
554 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
555 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
559 if (rc == -EREMOTE) {
560 /* intercept was handled, but userspace support is needed
561 * kvm_run has been prepared by the handler */
565 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
566 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
568 if (vcpu->sigset_active)
569 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
573 vcpu->stat.exit_userspace++;
577 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
578 unsigned long n, int prefix)
581 return copy_to_guest(vcpu, guestdest, from, n);
583 return copy_to_guest_absolute(vcpu, guestdest, from, n);
587 * store status at address
588 * we use have two special cases:
589 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
590 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
592 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
594 const unsigned char archmode = 1;
597 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
598 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
600 addr = SAVE_AREA_BASE;
602 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
603 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
605 addr = SAVE_AREA_BASE;
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
611 vcpu->arch.guest_fpregs.fprs, 128, prefix))
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
615 vcpu->arch.guest_gprs, 128, prefix))
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
619 &vcpu->arch.sie_block->gpsw, 16, prefix))
622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
623 &vcpu->arch.sie_block->prefix, 4, prefix))
626 if (__guestcopy(vcpu,
627 addr + offsetof(struct save_area, fp_ctrl_reg),
628 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
632 &vcpu->arch.sie_block->todpr, 4, prefix))
635 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
636 &vcpu->arch.sie_block->cputm, 8, prefix))
639 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
640 &vcpu->arch.sie_block->ckc, 8, prefix))
643 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
644 &vcpu->arch.guest_acrs, 64, prefix))
647 if (__guestcopy(vcpu,
648 addr + offsetof(struct save_area, ctrl_regs),
649 &vcpu->arch.sie_block->gcr, 128, prefix))
654 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
659 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
664 long kvm_arch_vcpu_ioctl(struct file *filp,
665 unsigned int ioctl, unsigned long arg)
667 struct kvm_vcpu *vcpu = filp->private_data;
668 void __user *argp = (void __user *)arg;
671 case KVM_S390_INTERRUPT: {
672 struct kvm_s390_interrupt s390int;
674 if (copy_from_user(&s390int, argp, sizeof(s390int)))
676 return kvm_s390_inject_vcpu(vcpu, &s390int);
678 case KVM_S390_STORE_STATUS:
679 return kvm_s390_vcpu_store_status(vcpu, arg);
680 case KVM_S390_SET_INITIAL_PSW: {
683 if (copy_from_user(&psw, argp, sizeof(psw)))
685 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
687 case KVM_S390_INITIAL_RESET:
688 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
695 /* Section: memory related */
696 int kvm_arch_prepare_memory_region(struct kvm *kvm,
697 struct kvm_memory_slot *memslot,
698 struct kvm_memory_slot old,
699 struct kvm_userspace_memory_region *mem,
702 /* A few sanity checks. We can have exactly one memory slot which has
703 to start at guest virtual zero and which has to be located at a
704 page boundary in userland and which has to end at a page boundary.
705 The memory in userland is ok to be fragmented into various different
706 vmas. It is okay to mmap() and munmap() stuff in this slot after
707 doing this call at any time */
712 if (mem->guest_phys_addr)
715 if (mem->userspace_addr & (PAGE_SIZE - 1))
718 if (mem->memory_size & (PAGE_SIZE - 1))
727 void kvm_arch_commit_memory_region(struct kvm *kvm,
728 struct kvm_userspace_memory_region *mem,
729 struct kvm_memory_slot old,
733 struct kvm_vcpu *vcpu;
735 /* request update of sie control block for all available vcpus */
736 kvm_for_each_vcpu(i, vcpu, kvm) {
737 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
739 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
743 void kvm_arch_flush_shadow(struct kvm *kvm)
747 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
752 static int __init kvm_s390_init(void)
755 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
760 * guests can ask for up to 255+1 double words, we need a full page
761 * to hold the maximum amount of facilites. On the other hand, we
762 * only set facilities that are known to work in KVM.
764 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
769 stfle(facilities, 1);
770 facilities[0] &= 0xff00fff3f0700000ULL;
774 static void __exit kvm_s390_exit(void)
776 free_page((unsigned long) facilities);
780 module_init(kvm_s390_init);
781 module_exit(kvm_s390_exit);