]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_pr.c
Merge branch 'dice-driver-playback-only' of git://git.alsa-project.org/alsa-kprivate...
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_pr.c
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *    Paul Mackerras <paulus@samba.org>
8  *
9  * Description:
10  * Functions relating to running KVM on Book 3S processors where
11  * we don't have access to hypervisor mode, and we run the guest
12  * in problem state (user mode).
13  *
14  * This file is derived from arch/powerpc/kvm/44x.c,
15  * by Hollis Blanchard <hollisb@us.ibm.com>.
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License, version 2, as
19  * published by the Free Software Foundation.
20  */
21
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26
27 #include <asm/reg.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43
44 #include "trace.h"
45
46 /* #define EXIT_DEBUG */
47 /* #define DEBUG_EXT */
48
49 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
50                              ulong msr);
51
52 /* Some compatibility defines */
53 #ifdef CONFIG_PPC_BOOK3S_32
54 #define MSR_USER32 MSR_USER
55 #define MSR_USER64 MSR_USER
56 #define HW_PAGE_SIZE PAGE_SIZE
57 #endif
58
59 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
60 {
61 #ifdef CONFIG_PPC_BOOK3S_64
62         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63         memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
64         memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65                sizeof(get_paca()->shadow_vcpu));
66         svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67         svcpu_put(svcpu);
68 #endif
69         vcpu->cpu = smp_processor_id();
70 #ifdef CONFIG_PPC_BOOK3S_32
71         current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
72 #endif
73 }
74
75 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
76 {
77 #ifdef CONFIG_PPC_BOOK3S_64
78         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
79         memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
80         memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81                sizeof(get_paca()->shadow_vcpu));
82         to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83         svcpu_put(svcpu);
84 #endif
85
86         kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
87         vcpu->cpu = -1;
88 }
89
90 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
91 {
92         int r = 1; /* Indicate we want to get back into the guest */
93
94         /* We misuse TLB_FLUSH to indicate that we want to clear
95            all shadow cache entries */
96         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
97                 kvmppc_mmu_pte_flush(vcpu, 0, 0);
98
99         return r;
100 }
101
102 /************* MMU Notifiers *************/
103
104 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
105 {
106         trace_kvm_unmap_hva(hva);
107
108         /*
109          * Flush all shadow tlb entries everywhere. This is slow, but
110          * we are 100% sure that we catch the to be unmapped page
111          */
112         kvm_flush_remote_tlbs(kvm);
113
114         return 0;
115 }
116
117 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
118 {
119         /* kvm_unmap_hva flushes everything anyways */
120         kvm_unmap_hva(kvm, start);
121
122         return 0;
123 }
124
125 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
126 {
127         /* XXX could be more clever ;) */
128         return 0;
129 }
130
131 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
132 {
133         /* XXX could be more clever ;) */
134         return 0;
135 }
136
137 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
138 {
139         /* The page will get remapped properly on its next fault */
140         kvm_unmap_hva(kvm, hva);
141 }
142
143 /*****************************************/
144
145 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
146 {
147         ulong smsr = vcpu->arch.shared->msr;
148
149         /* Guest MSR values */
150         smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
151         /* Process MSR values */
152         smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
153         /* External providers the guest reserved */
154         smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
155         /* 64-bit Process MSR values */
156 #ifdef CONFIG_PPC_BOOK3S_64
157         smsr |= MSR_ISF | MSR_HV;
158 #endif
159         vcpu->arch.shadow_msr = smsr;
160 }
161
162 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
163 {
164         ulong old_msr = vcpu->arch.shared->msr;
165
166 #ifdef EXIT_DEBUG
167         printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
168 #endif
169
170         msr &= to_book3s(vcpu)->msr_mask;
171         vcpu->arch.shared->msr = msr;
172         kvmppc_recalc_shadow_msr(vcpu);
173
174         if (msr & MSR_POW) {
175                 if (!vcpu->arch.pending_exceptions) {
176                         kvm_vcpu_block(vcpu);
177                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
178                         vcpu->stat.halt_wakeup++;
179
180                         /* Unset POW bit after we woke up */
181                         msr &= ~MSR_POW;
182                         vcpu->arch.shared->msr = msr;
183                 }
184         }
185
186         if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
187                    (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
188                 kvmppc_mmu_flush_segments(vcpu);
189                 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
190
191                 /* Preload magic page segment when in kernel mode */
192                 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
193                         struct kvm_vcpu_arch *a = &vcpu->arch;
194
195                         if (msr & MSR_DR)
196                                 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
197                         else
198                                 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
199                 }
200         }
201
202         /*
203          * When switching from 32 to 64-bit, we may have a stale 32-bit
204          * magic page around, we need to flush it. Typically 32-bit magic
205          * page will be instanciated when calling into RTAS. Note: We
206          * assume that such transition only happens while in kernel mode,
207          * ie, we never transition from user 32-bit to kernel 64-bit with
208          * a 32-bit magic page around.
209          */
210         if (vcpu->arch.magic_page_pa &&
211             !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
212                 /* going from RTAS to normal kernel code */
213                 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
214                                      ~0xFFFUL);
215         }
216
217         /* Preload FPU if it's enabled */
218         if (vcpu->arch.shared->msr & MSR_FP)
219                 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
220 }
221
222 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
223 {
224         u32 host_pvr;
225
226         vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
227         vcpu->arch.pvr = pvr;
228 #ifdef CONFIG_PPC_BOOK3S_64
229         if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
230                 kvmppc_mmu_book3s_64_init(vcpu);
231                 if (!to_book3s(vcpu)->hior_explicit)
232                         to_book3s(vcpu)->hior = 0xfff00000;
233                 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
234                 vcpu->arch.cpu_type = KVM_CPU_3S_64;
235         } else
236 #endif
237         {
238                 kvmppc_mmu_book3s_32_init(vcpu);
239                 if (!to_book3s(vcpu)->hior_explicit)
240                         to_book3s(vcpu)->hior = 0;
241                 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
242                 vcpu->arch.cpu_type = KVM_CPU_3S_32;
243         }
244
245         kvmppc_sanity_check(vcpu);
246
247         /* If we are in hypervisor level on 970, we can tell the CPU to
248          * treat DCBZ as 32 bytes store */
249         vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
250         if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
251             !strcmp(cur_cpu_spec->platform, "ppc970"))
252                 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
253
254         /* Cell performs badly if MSR_FEx are set. So let's hope nobody
255            really needs them in a VM on Cell and force disable them. */
256         if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
257                 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
258
259 #ifdef CONFIG_PPC_BOOK3S_32
260         /* 32 bit Book3S always has 32 byte dcbz */
261         vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
262 #endif
263
264         /* On some CPUs we can execute paired single operations natively */
265         asm ( "mfpvr %0" : "=r"(host_pvr));
266         switch (host_pvr) {
267         case 0x00080200:        /* lonestar 2.0 */
268         case 0x00088202:        /* lonestar 2.2 */
269         case 0x70000100:        /* gekko 1.0 */
270         case 0x00080100:        /* gekko 2.0 */
271         case 0x00083203:        /* gekko 2.3a */
272         case 0x00083213:        /* gekko 2.3b */
273         case 0x00083204:        /* gekko 2.4 */
274         case 0x00083214:        /* gekko 2.4e (8SE) - retail HW2 */
275         case 0x00087200:        /* broadway */
276                 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
277                 /* Enable HID2.PSE - in case we need it later */
278                 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
279         }
280 }
281
282 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
283  * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
284  * emulate 32 bytes dcbz length.
285  *
286  * The Book3s_64 inventors also realized this case and implemented a special bit
287  * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
288  *
289  * My approach here is to patch the dcbz instruction on executing pages.
290  */
291 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
292 {
293         struct page *hpage;
294         u64 hpage_offset;
295         u32 *page;
296         int i;
297
298         hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
299         if (is_error_page(hpage))
300                 return;
301
302         hpage_offset = pte->raddr & ~PAGE_MASK;
303         hpage_offset &= ~0xFFFULL;
304         hpage_offset /= 4;
305
306         get_page(hpage);
307         page = kmap_atomic(hpage);
308
309         /* patch dcbz into reserved instruction, so we trap */
310         for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
311                 if ((page[i] & 0xff0007ff) == INS_DCBZ)
312                         page[i] &= 0xfffffff7;
313
314         kunmap_atomic(page);
315         put_page(hpage);
316 }
317
318 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
319 {
320         ulong mp_pa = vcpu->arch.magic_page_pa;
321
322         if (!(vcpu->arch.shared->msr & MSR_SF))
323                 mp_pa = (uint32_t)mp_pa;
324
325         if (unlikely(mp_pa) &&
326             unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
327                 return 1;
328         }
329
330         return kvm_is_visible_gfn(vcpu->kvm, gfn);
331 }
332
333 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
334                             ulong eaddr, int vec)
335 {
336         bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
337         int r = RESUME_GUEST;
338         int relocated;
339         int page_found = 0;
340         struct kvmppc_pte pte;
341         bool is_mmio = false;
342         bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
343         bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
344         u64 vsid;
345
346         relocated = data ? dr : ir;
347
348         /* Resolve real address if translation turned on */
349         if (relocated) {
350                 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
351         } else {
352                 pte.may_execute = true;
353                 pte.may_read = true;
354                 pte.may_write = true;
355                 pte.raddr = eaddr & KVM_PAM;
356                 pte.eaddr = eaddr;
357                 pte.vpage = eaddr >> 12;
358         }
359
360         switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
361         case 0:
362                 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
363                 break;
364         case MSR_DR:
365         case MSR_IR:
366                 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
367
368                 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
369                         pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
370                 else
371                         pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
372                 pte.vpage |= vsid;
373
374                 if (vsid == -1)
375                         page_found = -EINVAL;
376                 break;
377         }
378
379         if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
380            (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
381                 /*
382                  * If we do the dcbz hack, we have to NX on every execution,
383                  * so we can patch the executing code. This renders our guest
384                  * NX-less.
385                  */
386                 pte.may_execute = !data;
387         }
388
389         if (page_found == -ENOENT) {
390                 /* Page not found in guest PTE entries */
391                 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
392                 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
393                 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
394                 vcpu->arch.shared->msr |=
395                         (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
396                 svcpu_put(svcpu);
397                 kvmppc_book3s_queue_irqprio(vcpu, vec);
398         } else if (page_found == -EPERM) {
399                 /* Storage protection */
400                 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
401                 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
402                 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
403                 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
404                 vcpu->arch.shared->msr |=
405                         svcpu->shadow_srr1 & 0x00000000f8000000ULL;
406                 svcpu_put(svcpu);
407                 kvmppc_book3s_queue_irqprio(vcpu, vec);
408         } else if (page_found == -EINVAL) {
409                 /* Page not found in guest SLB */
410                 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
411                 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
412         } else if (!is_mmio &&
413                    kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
414                 /* The guest's PTE is not mapped yet. Map on the host */
415                 kvmppc_mmu_map_page(vcpu, &pte);
416                 if (data)
417                         vcpu->stat.sp_storage++;
418                 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
419                         (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
420                         kvmppc_patch_dcbz(vcpu, &pte);
421         } else {
422                 /* MMIO */
423                 vcpu->stat.mmio_exits++;
424                 vcpu->arch.paddr_accessed = pte.raddr;
425                 vcpu->arch.vaddr_accessed = pte.eaddr;
426                 r = kvmppc_emulate_mmio(run, vcpu);
427                 if ( r == RESUME_HOST_NV )
428                         r = RESUME_HOST;
429         }
430
431         return r;
432 }
433
434 static inline int get_fpr_index(int i)
435 {
436         return i * TS_FPRWIDTH;
437 }
438
439 /* Give up external provider (FPU, Altivec, VSX) */
440 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
441 {
442         struct thread_struct *t = &current->thread;
443         u64 *vcpu_fpr = vcpu->arch.fpr;
444 #ifdef CONFIG_VSX
445         u64 *vcpu_vsx = vcpu->arch.vsr;
446 #endif
447         u64 *thread_fpr = (u64*)t->fpr;
448         int i;
449
450         /*
451          * VSX instructions can access FP and vector registers, so if
452          * we are giving up VSX, make sure we give up FP and VMX as well.
453          */
454         if (msr & MSR_VSX)
455                 msr |= MSR_FP | MSR_VEC;
456
457         msr &= vcpu->arch.guest_owned_ext;
458         if (!msr)
459                 return;
460
461 #ifdef DEBUG_EXT
462         printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
463 #endif
464
465         if (msr & MSR_FP) {
466                 /*
467                  * Note that on CPUs with VSX, giveup_fpu stores
468                  * both the traditional FP registers and the added VSX
469                  * registers into thread.fpr[].
470                  */
471                 if (current->thread.regs->msr & MSR_FP)
472                         giveup_fpu(current);
473                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
474                         vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
475
476                 vcpu->arch.fpscr = t->fpscr.val;
477
478 #ifdef CONFIG_VSX
479                 if (cpu_has_feature(CPU_FTR_VSX))
480                         for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
481                                 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
482 #endif
483         }
484
485 #ifdef CONFIG_ALTIVEC
486         if (msr & MSR_VEC) {
487                 if (current->thread.regs->msr & MSR_VEC)
488                         giveup_altivec(current);
489                 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
490                 vcpu->arch.vscr = t->vscr;
491         }
492 #endif
493
494         vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
495         kvmppc_recalc_shadow_msr(vcpu);
496 }
497
498 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
499 {
500         ulong srr0 = kvmppc_get_pc(vcpu);
501         u32 last_inst = kvmppc_get_last_inst(vcpu);
502         int ret;
503
504         ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
505         if (ret == -ENOENT) {
506                 ulong msr = vcpu->arch.shared->msr;
507
508                 msr = kvmppc_set_field(msr, 33, 33, 1);
509                 msr = kvmppc_set_field(msr, 34, 36, 0);
510                 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
511                 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
512                 return EMULATE_AGAIN;
513         }
514
515         return EMULATE_DONE;
516 }
517
518 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
519 {
520
521         /* Need to do paired single emulation? */
522         if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
523                 return EMULATE_DONE;
524
525         /* Read out the instruction */
526         if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
527                 /* Need to emulate */
528                 return EMULATE_FAIL;
529
530         return EMULATE_AGAIN;
531 }
532
533 /* Handle external providers (FPU, Altivec, VSX) */
534 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
535                              ulong msr)
536 {
537         struct thread_struct *t = &current->thread;
538         u64 *vcpu_fpr = vcpu->arch.fpr;
539 #ifdef CONFIG_VSX
540         u64 *vcpu_vsx = vcpu->arch.vsr;
541 #endif
542         u64 *thread_fpr = (u64*)t->fpr;
543         int i;
544
545         /* When we have paired singles, we emulate in software */
546         if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
547                 return RESUME_GUEST;
548
549         if (!(vcpu->arch.shared->msr & msr)) {
550                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
551                 return RESUME_GUEST;
552         }
553
554         if (msr == MSR_VSX) {
555                 /* No VSX?  Give an illegal instruction interrupt */
556 #ifdef CONFIG_VSX
557                 if (!cpu_has_feature(CPU_FTR_VSX))
558 #endif
559                 {
560                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
561                         return RESUME_GUEST;
562                 }
563
564                 /*
565                  * We have to load up all the FP and VMX registers before
566                  * we can let the guest use VSX instructions.
567                  */
568                 msr = MSR_FP | MSR_VEC | MSR_VSX;
569         }
570
571         /* See if we already own all the ext(s) needed */
572         msr &= ~vcpu->arch.guest_owned_ext;
573         if (!msr)
574                 return RESUME_GUEST;
575
576 #ifdef DEBUG_EXT
577         printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
578 #endif
579
580         if (msr & MSR_FP) {
581                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
582                         thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
583 #ifdef CONFIG_VSX
584                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
585                         thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
586 #endif
587                 t->fpscr.val = vcpu->arch.fpscr;
588                 t->fpexc_mode = 0;
589                 kvmppc_load_up_fpu();
590         }
591
592         if (msr & MSR_VEC) {
593 #ifdef CONFIG_ALTIVEC
594                 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
595                 t->vscr = vcpu->arch.vscr;
596                 t->vrsave = -1;
597                 kvmppc_load_up_altivec();
598 #endif
599         }
600
601         current->thread.regs->msr |= msr;
602         vcpu->arch.guest_owned_ext |= msr;
603         kvmppc_recalc_shadow_msr(vcpu);
604
605         return RESUME_GUEST;
606 }
607
608 /*
609  * Kernel code using FP or VMX could have flushed guest state to
610  * the thread_struct; if so, get it back now.
611  */
612 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
613 {
614         unsigned long lost_ext;
615
616         lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
617         if (!lost_ext)
618                 return;
619
620         if (lost_ext & MSR_FP)
621                 kvmppc_load_up_fpu();
622         if (lost_ext & MSR_VEC)
623                 kvmppc_load_up_altivec();
624         current->thread.regs->msr |= lost_ext;
625 }
626
627 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
628                        unsigned int exit_nr)
629 {
630         int r = RESUME_HOST;
631         int s;
632
633         vcpu->stat.sum_exits++;
634
635         run->exit_reason = KVM_EXIT_UNKNOWN;
636         run->ready_for_interrupt_injection = 1;
637
638         /* We get here with MSR.EE=1 */
639
640         trace_kvm_exit(exit_nr, vcpu);
641         kvm_guest_exit();
642
643         switch (exit_nr) {
644         case BOOK3S_INTERRUPT_INST_STORAGE:
645         {
646                 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
647                 ulong shadow_srr1 = svcpu->shadow_srr1;
648                 vcpu->stat.pf_instruc++;
649
650 #ifdef CONFIG_PPC_BOOK3S_32
651                 /* We set segments as unused segments when invalidating them. So
652                  * treat the respective fault as segment fault. */
653                 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
654                         kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
655                         r = RESUME_GUEST;
656                         svcpu_put(svcpu);
657                         break;
658                 }
659 #endif
660                 svcpu_put(svcpu);
661
662                 /* only care about PTEG not found errors, but leave NX alone */
663                 if (shadow_srr1 & 0x40000000) {
664                         r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
665                         vcpu->stat.sp_instruc++;
666                 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
667                           (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
668                         /*
669                          * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
670                          *     so we can't use the NX bit inside the guest. Let's cross our fingers,
671                          *     that no guest that needs the dcbz hack does NX.
672                          */
673                         kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
674                         r = RESUME_GUEST;
675                 } else {
676                         vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
677                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
678                         r = RESUME_GUEST;
679                 }
680                 break;
681         }
682         case BOOK3S_INTERRUPT_DATA_STORAGE:
683         {
684                 ulong dar = kvmppc_get_fault_dar(vcpu);
685                 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
686                 u32 fault_dsisr = svcpu->fault_dsisr;
687                 vcpu->stat.pf_storage++;
688
689 #ifdef CONFIG_PPC_BOOK3S_32
690                 /* We set segments as unused segments when invalidating them. So
691                  * treat the respective fault as segment fault. */
692                 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
693                         kvmppc_mmu_map_segment(vcpu, dar);
694                         r = RESUME_GUEST;
695                         svcpu_put(svcpu);
696                         break;
697                 }
698 #endif
699                 svcpu_put(svcpu);
700
701                 /* The only case we need to handle is missing shadow PTEs */
702                 if (fault_dsisr & DSISR_NOHPTE) {
703                         r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
704                 } else {
705                         vcpu->arch.shared->dar = dar;
706                         vcpu->arch.shared->dsisr = fault_dsisr;
707                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
708                         r = RESUME_GUEST;
709                 }
710                 break;
711         }
712         case BOOK3S_INTERRUPT_DATA_SEGMENT:
713                 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
714                         vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
715                         kvmppc_book3s_queue_irqprio(vcpu,
716                                 BOOK3S_INTERRUPT_DATA_SEGMENT);
717                 }
718                 r = RESUME_GUEST;
719                 break;
720         case BOOK3S_INTERRUPT_INST_SEGMENT:
721                 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
722                         kvmppc_book3s_queue_irqprio(vcpu,
723                                 BOOK3S_INTERRUPT_INST_SEGMENT);
724                 }
725                 r = RESUME_GUEST;
726                 break;
727         /* We're good on these - the host merely wanted to get our attention */
728         case BOOK3S_INTERRUPT_DECREMENTER:
729         case BOOK3S_INTERRUPT_HV_DECREMENTER:
730                 vcpu->stat.dec_exits++;
731                 r = RESUME_GUEST;
732                 break;
733         case BOOK3S_INTERRUPT_EXTERNAL:
734         case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
735         case BOOK3S_INTERRUPT_EXTERNAL_HV:
736                 vcpu->stat.ext_intr_exits++;
737                 r = RESUME_GUEST;
738                 break;
739         case BOOK3S_INTERRUPT_PERFMON:
740                 r = RESUME_GUEST;
741                 break;
742         case BOOK3S_INTERRUPT_PROGRAM:
743         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
744         {
745                 enum emulation_result er;
746                 struct kvmppc_book3s_shadow_vcpu *svcpu;
747                 ulong flags;
748
749 program_interrupt:
750                 svcpu = svcpu_get(vcpu);
751                 flags = svcpu->shadow_srr1 & 0x1f0000ull;
752                 svcpu_put(svcpu);
753
754                 if (vcpu->arch.shared->msr & MSR_PR) {
755 #ifdef EXIT_DEBUG
756                         printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
757 #endif
758                         if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
759                             (INS_DCBZ & 0xfffffff7)) {
760                                 kvmppc_core_queue_program(vcpu, flags);
761                                 r = RESUME_GUEST;
762                                 break;
763                         }
764                 }
765
766                 vcpu->stat.emulated_inst_exits++;
767                 er = kvmppc_emulate_instruction(run, vcpu);
768                 switch (er) {
769                 case EMULATE_DONE:
770                         r = RESUME_GUEST_NV;
771                         break;
772                 case EMULATE_AGAIN:
773                         r = RESUME_GUEST;
774                         break;
775                 case EMULATE_FAIL:
776                         printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
777                                __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
778                         kvmppc_core_queue_program(vcpu, flags);
779                         r = RESUME_GUEST;
780                         break;
781                 case EMULATE_DO_MMIO:
782                         run->exit_reason = KVM_EXIT_MMIO;
783                         r = RESUME_HOST_NV;
784                         break;
785                 case EMULATE_EXIT_USER:
786                         r = RESUME_HOST_NV;
787                         break;
788                 default:
789                         BUG();
790                 }
791                 break;
792         }
793         case BOOK3S_INTERRUPT_SYSCALL:
794                 if (vcpu->arch.papr_enabled &&
795                     (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
796                     !(vcpu->arch.shared->msr & MSR_PR)) {
797                         /* SC 1 papr hypercalls */
798                         ulong cmd = kvmppc_get_gpr(vcpu, 3);
799                         int i;
800
801 #ifdef CONFIG_KVM_BOOK3S_64_PR
802                         if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
803                                 r = RESUME_GUEST;
804                                 break;
805                         }
806 #endif
807
808                         run->papr_hcall.nr = cmd;
809                         for (i = 0; i < 9; ++i) {
810                                 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
811                                 run->papr_hcall.args[i] = gpr;
812                         }
813                         run->exit_reason = KVM_EXIT_PAPR_HCALL;
814                         vcpu->arch.hcall_needed = 1;
815                         r = RESUME_HOST;
816                 } else if (vcpu->arch.osi_enabled &&
817                     (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
818                     (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
819                         /* MOL hypercalls */
820                         u64 *gprs = run->osi.gprs;
821                         int i;
822
823                         run->exit_reason = KVM_EXIT_OSI;
824                         for (i = 0; i < 32; i++)
825                                 gprs[i] = kvmppc_get_gpr(vcpu, i);
826                         vcpu->arch.osi_needed = 1;
827                         r = RESUME_HOST_NV;
828                 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
829                     (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
830                         /* KVM PV hypercalls */
831                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
832                         r = RESUME_GUEST;
833                 } else {
834                         /* Guest syscalls */
835                         vcpu->stat.syscall_exits++;
836                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
837                         r = RESUME_GUEST;
838                 }
839                 break;
840         case BOOK3S_INTERRUPT_FP_UNAVAIL:
841         case BOOK3S_INTERRUPT_ALTIVEC:
842         case BOOK3S_INTERRUPT_VSX:
843         {
844                 int ext_msr = 0;
845
846                 switch (exit_nr) {
847                 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP;  break;
848                 case BOOK3S_INTERRUPT_ALTIVEC:    ext_msr = MSR_VEC; break;
849                 case BOOK3S_INTERRUPT_VSX:        ext_msr = MSR_VSX; break;
850                 }
851
852                 switch (kvmppc_check_ext(vcpu, exit_nr)) {
853                 case EMULATE_DONE:
854                         /* everything ok - let's enable the ext */
855                         r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
856                         break;
857                 case EMULATE_FAIL:
858                         /* we need to emulate this instruction */
859                         goto program_interrupt;
860                         break;
861                 default:
862                         /* nothing to worry about - go again */
863                         break;
864                 }
865                 break;
866         }
867         case BOOK3S_INTERRUPT_ALIGNMENT:
868                 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
869                         vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
870                                 kvmppc_get_last_inst(vcpu));
871                         vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
872                                 kvmppc_get_last_inst(vcpu));
873                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
874                 }
875                 r = RESUME_GUEST;
876                 break;
877         case BOOK3S_INTERRUPT_MACHINE_CHECK:
878         case BOOK3S_INTERRUPT_TRACE:
879                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
880                 r = RESUME_GUEST;
881                 break;
882         default:
883         {
884                 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
885                 ulong shadow_srr1 = svcpu->shadow_srr1;
886                 svcpu_put(svcpu);
887                 /* Ugh - bork here! What did we get? */
888                 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
889                         exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
890                 r = RESUME_HOST;
891                 BUG();
892                 break;
893         }
894         }
895
896         if (!(r & RESUME_HOST)) {
897                 /* To avoid clobbering exit_reason, only check for signals if
898                  * we aren't already exiting to userspace for some other
899                  * reason. */
900
901                 /*
902                  * Interrupts could be timers for the guest which we have to
903                  * inject again, so let's postpone them until we're in the guest
904                  * and if we really did time things so badly, then we just exit
905                  * again due to a host external interrupt.
906                  */
907                 local_irq_disable();
908                 s = kvmppc_prepare_to_enter(vcpu);
909                 if (s <= 0) {
910                         local_irq_enable();
911                         r = s;
912                 } else {
913                         kvmppc_fix_ee_before_entry();
914                 }
915                 kvmppc_handle_lost_ext(vcpu);
916         }
917
918         trace_kvm_book3s_reenter(r, vcpu);
919
920         return r;
921 }
922
923 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
924                                   struct kvm_sregs *sregs)
925 {
926         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
927         int i;
928
929         sregs->pvr = vcpu->arch.pvr;
930
931         sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
932         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
933                 for (i = 0; i < 64; i++) {
934                         sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
935                         sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
936                 }
937         } else {
938                 for (i = 0; i < 16; i++)
939                         sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
940
941                 for (i = 0; i < 8; i++) {
942                         sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
943                         sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
944                 }
945         }
946
947         return 0;
948 }
949
950 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
951                                   struct kvm_sregs *sregs)
952 {
953         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
954         int i;
955
956         kvmppc_set_pvr(vcpu, sregs->pvr);
957
958         vcpu3s->sdr1 = sregs->u.s.sdr1;
959         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
960                 for (i = 0; i < 64; i++) {
961                         vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
962                                                     sregs->u.s.ppc64.slb[i].slbe);
963                 }
964         } else {
965                 for (i = 0; i < 16; i++) {
966                         vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
967                 }
968                 for (i = 0; i < 8; i++) {
969                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
970                                        (u32)sregs->u.s.ppc32.ibat[i]);
971                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
972                                        (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
973                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
974                                        (u32)sregs->u.s.ppc32.dbat[i]);
975                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
976                                        (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
977                 }
978         }
979
980         /* Flush the MMU after messing with the segments */
981         kvmppc_mmu_pte_flush(vcpu, 0, 0);
982
983         return 0;
984 }
985
986 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
987 {
988         int r = 0;
989
990         switch (id) {
991         case KVM_REG_PPC_HIOR:
992                 *val = get_reg_val(id, to_book3s(vcpu)->hior);
993                 break;
994 #ifdef CONFIG_VSX
995         case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
996                 long int i = id - KVM_REG_PPC_VSR0;
997
998                 if (!cpu_has_feature(CPU_FTR_VSX)) {
999                         r = -ENXIO;
1000                         break;
1001                 }
1002                 val->vsxval[0] = vcpu->arch.fpr[i];
1003                 val->vsxval[1] = vcpu->arch.vsr[i];
1004                 break;
1005         }
1006 #endif /* CONFIG_VSX */
1007         default:
1008                 r = -EINVAL;
1009                 break;
1010         }
1011
1012         return r;
1013 }
1014
1015 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1016 {
1017         int r = 0;
1018
1019         switch (id) {
1020         case KVM_REG_PPC_HIOR:
1021                 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1022                 to_book3s(vcpu)->hior_explicit = true;
1023                 break;
1024 #ifdef CONFIG_VSX
1025         case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1026                 long int i = id - KVM_REG_PPC_VSR0;
1027
1028                 if (!cpu_has_feature(CPU_FTR_VSX)) {
1029                         r = -ENXIO;
1030                         break;
1031                 }
1032                 vcpu->arch.fpr[i] = val->vsxval[0];
1033                 vcpu->arch.vsr[i] = val->vsxval[1];
1034                 break;
1035         }
1036 #endif /* CONFIG_VSX */
1037         default:
1038                 r = -EINVAL;
1039                 break;
1040         }
1041
1042         return r;
1043 }
1044
1045 int kvmppc_core_check_processor_compat(void)
1046 {
1047         return 0;
1048 }
1049
1050 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1051 {
1052         struct kvmppc_vcpu_book3s *vcpu_book3s;
1053         struct kvm_vcpu *vcpu;
1054         int err = -ENOMEM;
1055         unsigned long p;
1056
1057         vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1058         if (!vcpu_book3s)
1059                 goto out;
1060
1061         vcpu_book3s->shadow_vcpu =
1062                 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1063         if (!vcpu_book3s->shadow_vcpu)
1064                 goto free_vcpu;
1065
1066         vcpu = &vcpu_book3s->vcpu;
1067         err = kvm_vcpu_init(vcpu, kvm, id);
1068         if (err)
1069                 goto free_shadow_vcpu;
1070
1071         err = -ENOMEM;
1072         p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1073         if (!p)
1074                 goto uninit_vcpu;
1075         /* the real shared page fills the last 4k of our page */
1076         vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1077
1078 #ifdef CONFIG_PPC_BOOK3S_64
1079         /* default to book3s_64 (970fx) */
1080         vcpu->arch.pvr = 0x3C0301;
1081 #else
1082         /* default to book3s_32 (750) */
1083         vcpu->arch.pvr = 0x84202;
1084 #endif
1085         kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1086         vcpu->arch.slb_nr = 64;
1087
1088         vcpu->arch.shadow_msr = MSR_USER64;
1089
1090         err = kvmppc_mmu_init(vcpu);
1091         if (err < 0)
1092                 goto uninit_vcpu;
1093
1094         return vcpu;
1095
1096 uninit_vcpu:
1097         kvm_vcpu_uninit(vcpu);
1098 free_shadow_vcpu:
1099         kfree(vcpu_book3s->shadow_vcpu);
1100 free_vcpu:
1101         vfree(vcpu_book3s);
1102 out:
1103         return ERR_PTR(err);
1104 }
1105
1106 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1107 {
1108         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1109
1110         free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1111         kvm_vcpu_uninit(vcpu);
1112         kfree(vcpu_book3s->shadow_vcpu);
1113         vfree(vcpu_book3s);
1114 }
1115
1116 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1117 {
1118         int ret;
1119         double fpr[32][TS_FPRWIDTH];
1120         unsigned int fpscr;
1121         int fpexc_mode;
1122 #ifdef CONFIG_ALTIVEC
1123         vector128 vr[32];
1124         vector128 vscr;
1125         unsigned long uninitialized_var(vrsave);
1126         int used_vr;
1127 #endif
1128 #ifdef CONFIG_VSX
1129         int used_vsr;
1130 #endif
1131         ulong ext_msr;
1132
1133         /* Check if we can run the vcpu at all */
1134         if (!vcpu->arch.sane) {
1135                 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1136                 ret = -EINVAL;
1137                 goto out;
1138         }
1139
1140         /*
1141          * Interrupts could be timers for the guest which we have to inject
1142          * again, so let's postpone them until we're in the guest and if we
1143          * really did time things so badly, then we just exit again due to
1144          * a host external interrupt.
1145          */
1146         local_irq_disable();
1147         ret = kvmppc_prepare_to_enter(vcpu);
1148         if (ret <= 0) {
1149                 local_irq_enable();
1150                 goto out;
1151         }
1152
1153         /* Save FPU state in stack */
1154         if (current->thread.regs->msr & MSR_FP)
1155                 giveup_fpu(current);
1156         memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1157         fpscr = current->thread.fpscr.val;
1158         fpexc_mode = current->thread.fpexc_mode;
1159
1160 #ifdef CONFIG_ALTIVEC
1161         /* Save Altivec state in stack */
1162         used_vr = current->thread.used_vr;
1163         if (used_vr) {
1164                 if (current->thread.regs->msr & MSR_VEC)
1165                         giveup_altivec(current);
1166                 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1167                 vscr = current->thread.vscr;
1168                 vrsave = current->thread.vrsave;
1169         }
1170 #endif
1171
1172 #ifdef CONFIG_VSX
1173         /* Save VSX state in stack */
1174         used_vsr = current->thread.used_vsr;
1175         if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1176                 __giveup_vsx(current);
1177 #endif
1178
1179         /* Remember the MSR with disabled extensions */
1180         ext_msr = current->thread.regs->msr;
1181
1182         /* Preload FPU if it's enabled */
1183         if (vcpu->arch.shared->msr & MSR_FP)
1184                 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1185
1186         kvmppc_fix_ee_before_entry();
1187
1188         ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1189
1190         /* No need for kvm_guest_exit. It's done in handle_exit.
1191            We also get here with interrupts enabled. */
1192
1193         /* Make sure we save the guest FPU/Altivec/VSX state */
1194         kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1195
1196         current->thread.regs->msr = ext_msr;
1197
1198         /* Restore FPU/VSX state from stack */
1199         memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1200         current->thread.fpscr.val = fpscr;
1201         current->thread.fpexc_mode = fpexc_mode;
1202
1203 #ifdef CONFIG_ALTIVEC
1204         /* Restore Altivec state from stack */
1205         if (used_vr && current->thread.used_vr) {
1206                 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1207                 current->thread.vscr = vscr;
1208                 current->thread.vrsave = vrsave;
1209         }
1210         current->thread.used_vr = used_vr;
1211 #endif
1212
1213 #ifdef CONFIG_VSX
1214         current->thread.used_vsr = used_vsr;
1215 #endif
1216
1217 out:
1218         vcpu->mode = OUTSIDE_GUEST_MODE;
1219         return ret;
1220 }
1221
1222 /*
1223  * Get (and clear) the dirty memory log for a memory slot.
1224  */
1225 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1226                                       struct kvm_dirty_log *log)
1227 {
1228         struct kvm_memory_slot *memslot;
1229         struct kvm_vcpu *vcpu;
1230         ulong ga, ga_end;
1231         int is_dirty = 0;
1232         int r;
1233         unsigned long n;
1234
1235         mutex_lock(&kvm->slots_lock);
1236
1237         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1238         if (r)
1239                 goto out;
1240
1241         /* If nothing is dirty, don't bother messing with page tables. */
1242         if (is_dirty) {
1243                 memslot = id_to_memslot(kvm->memslots, log->slot);
1244
1245                 ga = memslot->base_gfn << PAGE_SHIFT;
1246                 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1247
1248                 kvm_for_each_vcpu(n, vcpu, kvm)
1249                         kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1250
1251                 n = kvm_dirty_bitmap_bytes(memslot);
1252                 memset(memslot->dirty_bitmap, 0, n);
1253         }
1254
1255         r = 0;
1256 out:
1257         mutex_unlock(&kvm->slots_lock);
1258         return r;
1259 }
1260
1261 #ifdef CONFIG_PPC64
1262 int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1263 {
1264         info->flags = KVM_PPC_1T_SEGMENTS;
1265
1266         /* SLB is always 64 entries */
1267         info->slb_size = 64;
1268
1269         /* Standard 4k base page size segment */
1270         info->sps[0].page_shift = 12;
1271         info->sps[0].slb_enc = 0;
1272         info->sps[0].enc[0].page_shift = 12;
1273         info->sps[0].enc[0].pte_enc = 0;
1274
1275         /* Standard 16M large page size segment */
1276         info->sps[1].page_shift = 24;
1277         info->sps[1].slb_enc = SLB_VSID_L;
1278         info->sps[1].enc[0].page_shift = 24;
1279         info->sps[1].enc[0].pte_enc = 0;
1280
1281         return 0;
1282 }
1283 #endif /* CONFIG_PPC64 */
1284
1285 void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1286                               struct kvm_memory_slot *dont)
1287 {
1288 }
1289
1290 int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1291                                unsigned long npages)
1292 {
1293         return 0;
1294 }
1295
1296 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1297                                       struct kvm_memory_slot *memslot,
1298                                       struct kvm_userspace_memory_region *mem)
1299 {
1300         return 0;
1301 }
1302
1303 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1304                                 struct kvm_userspace_memory_region *mem,
1305                                 const struct kvm_memory_slot *old)
1306 {
1307 }
1308
1309 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1310 {
1311 }
1312
1313 static unsigned int kvm_global_user_count = 0;
1314 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1315
1316 int kvmppc_core_init_vm(struct kvm *kvm)
1317 {
1318 #ifdef CONFIG_PPC64
1319         INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1320         INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1321 #endif
1322
1323         if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1324                 spin_lock(&kvm_global_user_count_lock);
1325                 if (++kvm_global_user_count == 1)
1326                         pSeries_disable_reloc_on_exc();
1327                 spin_unlock(&kvm_global_user_count_lock);
1328         }
1329         return 0;
1330 }
1331
1332 void kvmppc_core_destroy_vm(struct kvm *kvm)
1333 {
1334 #ifdef CONFIG_PPC64
1335         WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1336 #endif
1337
1338         if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1339                 spin_lock(&kvm_global_user_count_lock);
1340                 BUG_ON(kvm_global_user_count == 0);
1341                 if (--kvm_global_user_count == 0)
1342                         pSeries_enable_reloc_on_exc();
1343                 spin_unlock(&kvm_global_user_count_lock);
1344         }
1345 }
1346
1347 static int kvmppc_book3s_init(void)
1348 {
1349         int r;
1350
1351         r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1352                      THIS_MODULE);
1353
1354         if (r)
1355                 return r;
1356
1357         r = kvmppc_mmu_hpte_sysinit();
1358
1359         return r;
1360 }
1361
1362 static void kvmppc_book3s_exit(void)
1363 {
1364         kvmppc_mmu_hpte_sysexit();
1365         kvm_exit();
1366 }
1367
1368 module_init(kvmppc_book3s_init);
1369 module_exit(kvmppc_book3s_exit);