2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
34 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
78 ld r3,PACA_SPRG_VDSO(r13)
79 mtspr SPRN_SPRG_VDSO_WRITE,r3
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
87 ld r3, HSTATE_MMCR0(r13)
88 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
91 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
92 lwz r3, HSTATE_PMC1(r13)
93 lwz r4, HSTATE_PMC2(r13)
94 lwz r5, HSTATE_PMC3(r13)
95 lwz r6, HSTATE_PMC4(r13)
96 lwz r8, HSTATE_PMC5(r13)
97 lwz r9, HSTATE_PMC6(r13)
104 ld r3, HSTATE_MMCR0(r13)
105 ld r4, HSTATE_MMCR1(r13)
106 ld r5, HSTATE_MMCRA(r13)
107 ld r6, HSTATE_SIAR(r13)
108 ld r7, HSTATE_SDAR(r13)
114 ld r8, HSTATE_MMCR2(r13)
115 ld r9, HSTATE_SIER(r13)
118 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
124 * Reload DEC. HDEC interrupts were disabled when
125 * we reloaded the host's LPCR value.
127 ld r3, HSTATE_DECEXP(r13)
132 /* hwthread_req may have got set by cede or no vcpu, so clear it */
134 stb r0, HSTATE_HWTHREAD_REQ(r13)
137 * For external and machine check interrupts, we need
138 * to call the Linux handler to process the interrupt.
139 * We do that by jumping to absolute address 0x500 for
140 * external interrupts, or the machine_check_fwnmi label
141 * for machine checks (since firmware might have patched
142 * the vector area at 0x200). The [h]rfid at the end of the
143 * handler will return to the book3s_hv_interrupts.S code.
144 * For other interrupts we do the rfid to get back
145 * to the book3s_hv_interrupts.S code here.
147 ld r8, 112+PPC_LR_STKOFF(r1)
149 ld r7, HSTATE_HOST_MSR(r13)
152 * If we came back from the guest via a relocation-on interrupt,
153 * we will be in virtual mode at this point, which makes it a
154 * little easier to get back to the caller.
157 andi. r0, r0, MSR_IR /* in real mode? */
160 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
161 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
163 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
164 beq 15f /* Invoke the H_DOORBELL handler */
165 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
166 beq cr2, 14f /* HMI check */
168 /* RFI into the highmem handler, or branch to interrupt handler */
172 mtmsrd r6, 1 /* Clear RI in MSR */
175 beq cr1, 13f /* machine check */
178 /* On POWER7, we have external interrupts set to use HSRR0/1 */
179 11: mtspr SPRN_HSRR0, r8
183 13: b machine_check_fwnmi
185 14: mtspr SPRN_HSRR0, r8
187 b hmi_exception_after_realmode
189 15: mtspr SPRN_HSRR0, r8
193 /* Virtual-mode return - can't get here for HMI or machine check */
195 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
197 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
199 andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
201 mtmsrd r7, 1 /* if so then re-enable them */
205 16: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
207 b exc_virt_0x4500_hardware_interrupt
209 17: mtspr SPRN_HSRR0, r8
211 b exc_virt_0x4e80_h_doorbell
213 kvmppc_primary_no_guest:
214 /* We handle this much like a ceded vcpu */
215 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
219 * Make sure the primary has finished the MMU switch.
220 * We should never get here on a secondary thread, but
221 * check it for robustness' sake.
223 ld r5, HSTATE_KVM_VCORE(r13)
224 65: lbz r0, VCORE_IN_GUEST(r5)
231 /* set our bit in napping_threads */
232 ld r5, HSTATE_KVM_VCORE(r13)
233 lbz r7, HSTATE_PTID(r13)
236 addi r6, r5, VCORE_NAPPING_THREADS
241 /* order napping_threads update vs testing entry_exit_map */
244 lwz r7, VCORE_ENTRY_EXIT(r5)
246 bge kvm_novcpu_exit /* another thread already exiting */
247 li r3, NAPPING_NOVCPU
248 stb r3, HSTATE_NAPPING(r13)
250 li r3, 0 /* Don't wake on privileged (OS) doorbell */
255 * Entered from kvm_start_guest if kvm_hstate.napping is set
261 ld r1, HSTATE_HOST_R1(r13)
262 ld r5, HSTATE_KVM_VCORE(r13)
264 stb r0, HSTATE_NAPPING(r13)
266 /* check the wake reason */
267 bl kvmppc_check_wake_reason
270 * Restore volatile registers since we could have called
271 * a C routine in kvmppc_check_wake_reason.
274 ld r5, HSTATE_KVM_VCORE(r13)
276 /* see if any other thread is already exiting */
277 lwz r0, VCORE_ENTRY_EXIT(r5)
281 /* clear our bit in napping_threads */
282 lbz r7, HSTATE_PTID(r13)
285 addi r6, r5, VCORE_NAPPING_THREADS
291 /* See if the wake reason means we need to exit */
295 /* See if our timeslice has expired (HDEC is negative) */
297 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
301 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
302 ld r4, HSTATE_KVM_VCPU(r13)
304 beq kvmppc_primary_no_guest
306 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
307 addi r3, r4, VCPU_TB_RMENTRY
308 bl kvmhv_start_timing
313 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
314 ld r4, HSTATE_KVM_VCPU(r13)
317 addi r3, r4, VCPU_TB_RMEXIT
318 bl kvmhv_accumulate_time
322 bl kvmhv_commence_exit
325 b kvmhv_switch_to_host
328 * We come in here when wakened from nap mode.
329 * Relocation is off and most register values are lost.
330 * r13 points to the PACA.
332 .globl kvm_start_guest
335 /* Set runlatch bit the minute you wake up from nap */
342 li r0,KVM_HWTHREAD_IN_KVM
343 stb r0,HSTATE_HWTHREAD_STATE(r13)
345 /* NV GPR values from power7_idle() will no longer be valid */
347 stb r0,PACA_NAPSTATELOST(r13)
349 /* were we napping due to cede? */
350 lbz r0,HSTATE_NAPPING(r13)
351 cmpwi r0,NAPPING_CEDE
353 cmpwi r0,NAPPING_NOVCPU
354 beq kvm_novcpu_wakeup
356 ld r1,PACAEMERGSP(r13)
357 subi r1,r1,STACK_FRAME_OVERHEAD
360 * We weren't napping due to cede, so this must be a secondary
361 * thread being woken up to run a guest, or being woken up due
362 * to a stray IPI. (Or due to some machine check or hypervisor
363 * maintenance interrupt while the core is in KVM.)
366 /* Check the wake reason in SRR1 to see why we got here */
367 bl kvmppc_check_wake_reason
369 * kvmppc_check_wake_reason could invoke a C routine, but we
370 * have no volatile registers to restore when we return.
376 /* get vcore pointer, NULL if we have nothing to run */
377 ld r5,HSTATE_KVM_VCORE(r13)
379 /* if we have no vcore to run, go back to sleep */
382 kvm_secondary_got_guest:
384 /* Set HSTATE_DSCR(r13) to something sensible */
385 ld r6, PACA_DSCR_DEFAULT(r13)
386 std r6, HSTATE_DSCR(r13)
388 /* On thread 0 of a subcore, set HDEC to max */
389 lbz r4, HSTATE_PTID(r13)
395 /* and set per-LPAR registers, if doing dynamic micro-threading */
396 ld r6, HSTATE_SPLIT_MODE(r13)
399 ld r0, KVM_SPLIT_RPR(r6)
401 ld r0, KVM_SPLIT_PMMAR(r6)
403 ld r0, KVM_SPLIT_LDBAR(r6)
407 /* Order load of vcpu after load of vcore */
409 ld r4, HSTATE_KVM_VCPU(r13)
412 /* Back from the guest, go back to nap */
413 /* Clear our vcpu and vcore pointers so we don't come back in early */
415 std r0, HSTATE_KVM_VCPU(r13)
417 * Once we clear HSTATE_KVM_VCORE(r13), the code in
418 * kvmppc_run_core() is going to assume that all our vcpu
419 * state is visible in memory. This lwsync makes sure
423 std r0, HSTATE_KVM_VCORE(r13)
426 * All secondaries exiting guest will fall through this path.
427 * Before proceeding, just check for HMI interrupt and
428 * invoke opal hmi handler. By now we are sure that the
429 * primary thread on this core/subcore has already made partition
430 * switch/TB resync and we are good to call opal hmi handler.
432 cmpwi r12, BOOK3S_INTERRUPT_HMI
435 li r3,0 /* NULL argument */
436 bl hmi_exception_realmode
438 * At this point we have finished executing in the guest.
439 * We need to wait for hwthread_req to become zero, since
440 * we may not turn on the MMU while hwthread_req is non-zero.
441 * While waiting we also need to check if we get given a vcpu to run.
444 lbz r3, HSTATE_HWTHREAD_REQ(r13)
448 li r0, KVM_HWTHREAD_IN_KERNEL
449 stb r0, HSTATE_HWTHREAD_STATE(r13)
450 /* need to recheck hwthread_req after a barrier, to avoid race */
452 lbz r3, HSTATE_HWTHREAD_REQ(r13)
456 * We jump to pnv_wakeup_loss, which will return to the caller
457 * of power7_nap in the powernv cpu offline loop. The value we
458 * put in r3 becomes the return value for power7_nap.
462 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
468 ld r5, HSTATE_KVM_VCORE(r13)
471 ld r3, HSTATE_SPLIT_MODE(r13)
474 lbz r0, KVM_SPLIT_DO_NAP(r3)
480 b kvm_secondary_got_guest
482 54: li r0, KVM_HWTHREAD_IN_KVM
483 stb r0, HSTATE_HWTHREAD_STATE(r13)
487 * Here the primary thread is trying to return the core to
488 * whole-core mode, so we need to nap.
492 * When secondaries are napping in kvm_unsplit_nap() with
493 * hwthread_req = 1, HMI goes ignored even though subcores are
494 * already exited the guest. Hence HMI keeps waking up secondaries
495 * from nap in a loop and secondaries always go back to nap since
496 * no vcore is assigned to them. This makes impossible for primary
497 * thread to get hold of secondary threads resulting into a soft
498 * lockup in KVM path.
500 * Let us check if HMI is pending and handle it before we go to nap.
502 cmpwi r12, BOOK3S_INTERRUPT_HMI
504 li r3, 0 /* NULL argument */
505 bl hmi_exception_realmode
508 * Ensure that secondary doesn't nap when it has
509 * its vcore pointer set.
511 sync /* matches smp_mb() before setting split_info.do_nap */
512 ld r0, HSTATE_KVM_VCORE(r13)
515 /* clear any pending message */
517 lis r6, (PPC_DBELL_SERVER << (63-36))@h
519 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
520 /* Set kvm_split_mode.napped[tid] = 1 */
521 ld r3, HSTATE_SPLIT_MODE(r13)
523 lhz r4, PACAPACAINDEX(r13)
524 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
525 addi r4, r4, KVM_SPLIT_NAPPED
527 /* Check the do_nap flag again after setting napped[] */
529 lbz r0, KVM_SPLIT_DO_NAP(r3)
532 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
534 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
541 /******************************************************************************
545 *****************************************************************************/
547 /* Stack frame offsets */
548 #define STACK_SLOT_TID (112-16)
549 #define STACK_SLOT_PSSCR (112-24)
550 #define STACK_SLOT_PID (112-32)
552 .global kvmppc_hv_entry
557 * R4 = vcpu pointer (or NULL)
562 * all other volatile GPRS = free
563 * Does not preserve non-volatile GPRs or CR fields
566 std r0, PPC_LR_STKOFF(r1)
569 /* Save R1 in the PACA */
570 std r1, HSTATE_HOST_R1(r13)
572 li r6, KVM_GUEST_MODE_HOST_HV
573 stb r6, HSTATE_IN_GUEST(r13)
575 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
576 /* Store initial timestamp */
579 addi r3, r4, VCPU_TB_RMENTRY
580 bl kvmhv_start_timing
584 /* Use cr7 as an indication of radix mode */
585 ld r5, HSTATE_KVM_VCORE(r13)
586 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
587 lbz r0, KVM_RADIX(r9)
590 /* Clear out SLB if hash */
598 * POWER7/POWER8 host -> guest partition switch code.
599 * We don't have to lock against concurrent tlbies,
600 * but we do have to coordinate across hardware threads.
602 /* Set bit in entry map iff exit map is zero. */
604 lbz r6, HSTATE_PTID(r13)
606 addi r8, r5, VCORE_ENTRY_EXIT
608 cmpwi r3, 0x100 /* any threads starting to exit? */
609 bge secondary_too_late /* if so we're too late to the party */
614 /* Primary thread switches to guest partition. */
620 li r0,LPID_RSVD /* switch to reserved LPID */
623 mtspr SPRN_SDR1,r6 /* switch to partition page table */
624 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
628 /* See if we need to flush the TLB */
629 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
632 * On POWER9, individual threads can come in here, but the
633 * TLB is shared between the 4 threads in a core, hence
634 * invalidating on one thread invalidates for all.
635 * Thus we make all 4 threads use the same bit here.
638 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
639 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
640 srdi r6,r6,6 /* doubleword number */
641 sldi r6,r6,3 /* address offset */
643 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
649 /* Flush the TLB of any entries for this LPID */
650 lwz r0,KVM_TLB_SETS(r9)
652 li r7,0x800 /* IS field = 0b10 */
654 li r0,0 /* RS for P9 version of tlbiel */
656 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
660 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
664 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
669 /* Add timebase offset onto timebase */
670 22: ld r8,VCORE_TB_OFFSET(r5)
673 mftb r6 /* current host timebase */
675 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
676 mftb r7 /* check if lower 24 bits overflowed */
681 addis r8,r8,0x100 /* if so, increment upper 40 bits */
684 /* Load guest PCR value to select appropriate compat mode */
685 37: ld r7, VCORE_PCR(r5)
692 /* DPDES and VTB are shared between threads */
693 ld r8, VCORE_DPDES(r5)
697 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
699 /* Mark the subcore state as inside guest */
700 bl kvmppc_subcore_enter_guest
702 ld r5, HSTATE_KVM_VCORE(r13)
703 ld r4, HSTATE_KVM_VCPU(r13)
705 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
707 /* Do we have a guest vcpu to run? */
709 beq kvmppc_primary_no_guest
712 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
713 lwz r5,VCPU_SLB_MAX(r4)
718 1: ld r8,VCPU_SLB_E(r6)
721 addi r6,r6,VCPU_SLB_SIZE
724 /* Increment yield count if they have a VPA */
728 li r6, LPPACA_YIELDCOUNT
733 stb r6, VCPU_VPA_DIRTY(r4)
736 /* Save purr/spurr */
739 std r5,HSTATE_PURR(r13)
740 std r6,HSTATE_SPURR(r13)
746 /* Save host values of some registers */
751 std r5, STACK_SLOT_TID(r1)
752 std r6, STACK_SLOT_PSSCR(r1)
753 std r7, STACK_SLOT_PID(r1)
754 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
757 /* Set partition DABR */
758 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
759 lwz r5,VCPU_DABRX(r4)
764 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
766 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
769 END_FTR_SECTION_IFSET(CPU_FTR_TM)
772 /* Load guest PMU registers */
773 /* R4 is live here (vcpu pointer) */
775 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
776 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
780 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
783 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
784 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
785 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
786 lwz r6, VCPU_PMC + 8(r4)
787 lwz r7, VCPU_PMC + 12(r4)
788 lwz r8, VCPU_PMC + 16(r4)
789 lwz r9, VCPU_PMC + 20(r4)
797 ld r5, VCPU_MMCR + 8(r4)
798 ld r6, VCPU_MMCR + 16(r4)
806 ld r5, VCPU_MMCR + 24(r4)
810 BEGIN_FTR_SECTION_NESTED(96)
811 lwz r7, VCPU_PMC + 24(r4)
812 lwz r8, VCPU_PMC + 28(r4)
813 ld r9, VCPU_MMCR + 32(r4)
817 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
818 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
822 /* Load up FP, VMX and VSX registers */
825 ld r14, VCPU_GPR(R14)(r4)
826 ld r15, VCPU_GPR(R15)(r4)
827 ld r16, VCPU_GPR(R16)(r4)
828 ld r17, VCPU_GPR(R17)(r4)
829 ld r18, VCPU_GPR(R18)(r4)
830 ld r19, VCPU_GPR(R19)(r4)
831 ld r20, VCPU_GPR(R20)(r4)
832 ld r21, VCPU_GPR(R21)(r4)
833 ld r22, VCPU_GPR(R22)(r4)
834 ld r23, VCPU_GPR(R23)(r4)
835 ld r24, VCPU_GPR(R24)(r4)
836 ld r25, VCPU_GPR(R25)(r4)
837 ld r26, VCPU_GPR(R26)(r4)
838 ld r27, VCPU_GPR(R27)(r4)
839 ld r28, VCPU_GPR(R28)(r4)
840 ld r29, VCPU_GPR(R29)(r4)
841 ld r30, VCPU_GPR(R30)(r4)
842 ld r31, VCPU_GPR(R31)(r4)
844 /* Switch DSCR to guest value */
849 /* Skip next section on POWER7 */
851 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
852 /* Load up POWER8-specific registers */
854 lwz r6, VCPU_PSPB(r4)
860 ld r6, VCPU_DAWRX(r4)
861 ld r7, VCPU_CIABR(r4)
868 ld r8, VCPU_EBBHR(r4)
871 ld r5, VCPU_EBBRR(r4)
872 ld r6, VCPU_BESCR(r4)
873 lwz r7, VCPU_GUEST_PID(r4)
881 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
883 /* POWER8-only registers */
884 ld r5, VCPU_TCSCR(r4)
886 ld r7, VCPU_CSIGR(r4)
893 /* POWER9-only registers */
895 ld r6, VCPU_PSSCR(r4)
896 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
899 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
903 * Set the decrementer to the guest decrementer.
905 ld r8,VCPU_DEC_EXPIRES(r4)
906 /* r8 is a host timebase value here, convert to guest TB */
907 ld r5,HSTATE_KVM_VCORE(r13)
908 ld r6,VCORE_TB_OFFSET(r5)
915 ld r5, VCPU_SPRG0(r4)
916 ld r6, VCPU_SPRG1(r4)
917 ld r7, VCPU_SPRG2(r4)
918 ld r8, VCPU_SPRG3(r4)
924 /* Load up DAR and DSISR */
926 lwz r6, VCPU_DSISR(r4)
930 /* Restore AMR and UAMOR, set AMOR to all 1s */
938 /* Restore state of CTRL run bit; assume 1 on entry */
946 /* Secondary threads wait for primary to have done partition switch */
947 ld r5, HSTATE_KVM_VCORE(r13)
948 lbz r6, HSTATE_PTID(r13)
951 lbz r0, VCORE_IN_GUEST(r5)
955 20: lwz r3, VCORE_ENTRY_EXIT(r5)
958 lbz r0, VCORE_IN_GUEST(r5)
968 /* Check if HDEC expires soon */
970 cmpwi r3, 512 /* 1 microsecond */
973 deliver_guest_interrupt:
980 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
988 /* r11 = vcpu->arch.msr & ~MSR_HV */
989 rldicl r11, r11, 63 - MSR_HV_LG, 1
990 rotldi r11, r11, 1 + MSR_HV_LG
993 /* Check if we can deliver an external or decrementer interrupt now */
994 ld r0, VCPU_PENDING_EXC(r4)
995 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
997 andi. r8, r11, MSR_EE
999 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1000 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1004 li r0, BOOK3S_INTERRUPT_EXTERNAL
1008 li r0, BOOK3S_INTERRUPT_DECREMENTER
1011 12: mtspr SPRN_SRR0, r10
1013 mtspr SPRN_SRR1, r11
1015 bl kvmppc_msr_interrupt
1021 * R10: value for HSRR0
1022 * R11: value for HSRR1
1027 stb r0,VCPU_CEDED(r4) /* cancel cede */
1028 mtspr SPRN_HSRR0,r10
1029 mtspr SPRN_HSRR1,r11
1031 /* Activate guest mode, so faults get handled by KVM */
1032 li r9, KVM_GUEST_MODE_GUEST_HV
1033 stb r9, HSTATE_IN_GUEST(r13)
1035 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1036 /* Accumulate timing */
1037 addi r3, r4, VCPU_TB_GUEST
1038 bl kvmhv_accumulate_time
1044 ld r5, VCPU_CFAR(r4)
1046 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1049 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1056 ld r1, VCPU_GPR(R1)(r4)
1057 ld r2, VCPU_GPR(R2)(r4)
1058 ld r3, VCPU_GPR(R3)(r4)
1059 ld r5, VCPU_GPR(R5)(r4)
1060 ld r6, VCPU_GPR(R6)(r4)
1061 ld r7, VCPU_GPR(R7)(r4)
1062 ld r8, VCPU_GPR(R8)(r4)
1063 ld r9, VCPU_GPR(R9)(r4)
1064 ld r10, VCPU_GPR(R10)(r4)
1065 ld r11, VCPU_GPR(R11)(r4)
1066 ld r12, VCPU_GPR(R12)(r4)
1067 ld r13, VCPU_GPR(R13)(r4)
1071 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1072 ld r0, VCPU_GPR(R0)(r4)
1073 ld r4, VCPU_GPR(R4)(r4)
1082 stw r12, VCPU_TRAP(r4)
1083 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1084 addi r3, r4, VCPU_TB_RMEXIT
1085 bl kvmhv_accumulate_time
1087 11: b kvmhv_switch_to_host
1094 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1095 12: stw r12, VCPU_TRAP(r4)
1097 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1098 addi r3, r4, VCPU_TB_RMEXIT
1099 bl kvmhv_accumulate_time
1103 /******************************************************************************
1107 *****************************************************************************/
1110 * We come here from the first-level interrupt handlers.
1112 .globl kvmppc_interrupt_hv
1113 kvmppc_interrupt_hv:
1115 * Register contents:
1116 * R12 = (guest CR << 32) | interrupt vector
1118 * guest R12 saved in shadow VCPU SCRATCH0
1119 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1120 * guest R13 saved in SPRN_SCRATCH0
1122 std r9, HSTATE_SCRATCH2(r13)
1123 lbz r9, HSTATE_IN_GUEST(r13)
1124 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1125 beq kvmppc_bad_host_intr
1126 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1127 cmpwi r9, KVM_GUEST_MODE_GUEST
1128 ld r9, HSTATE_SCRATCH2(r13)
1129 beq kvmppc_interrupt_pr
1131 /* We're now back in the host but in guest MMU context */
1132 li r9, KVM_GUEST_MODE_HOST_HV
1133 stb r9, HSTATE_IN_GUEST(r13)
1135 ld r9, HSTATE_KVM_VCPU(r13)
1137 /* Save registers */
1139 std r0, VCPU_GPR(R0)(r9)
1140 std r1, VCPU_GPR(R1)(r9)
1141 std r2, VCPU_GPR(R2)(r9)
1142 std r3, VCPU_GPR(R3)(r9)
1143 std r4, VCPU_GPR(R4)(r9)
1144 std r5, VCPU_GPR(R5)(r9)
1145 std r6, VCPU_GPR(R6)(r9)
1146 std r7, VCPU_GPR(R7)(r9)
1147 std r8, VCPU_GPR(R8)(r9)
1148 ld r0, HSTATE_SCRATCH2(r13)
1149 std r0, VCPU_GPR(R9)(r9)
1150 std r10, VCPU_GPR(R10)(r9)
1151 std r11, VCPU_GPR(R11)(r9)
1152 ld r3, HSTATE_SCRATCH0(r13)
1153 std r3, VCPU_GPR(R12)(r9)
1154 /* CR is in the high half of r12 */
1158 ld r3, HSTATE_CFAR(r13)
1159 std r3, VCPU_CFAR(r9)
1160 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1162 ld r4, HSTATE_PPR(r13)
1163 std r4, VCPU_PPR(r9)
1164 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1166 /* Restore R1/R2 so we can handle faults */
1167 ld r1, HSTATE_HOST_R1(r13)
1170 mfspr r10, SPRN_SRR0
1171 mfspr r11, SPRN_SRR1
1172 std r10, VCPU_SRR0(r9)
1173 std r11, VCPU_SRR1(r9)
1174 /* trap is in the low half of r12, clear CR from the high half */
1176 andi. r0, r12, 2 /* need to read HSRR0/1? */
1178 mfspr r10, SPRN_HSRR0
1179 mfspr r11, SPRN_HSRR1
1181 1: std r10, VCPU_PC(r9)
1182 std r11, VCPU_MSR(r9)
1186 std r3, VCPU_GPR(R13)(r9)
1189 stw r12,VCPU_TRAP(r9)
1191 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1192 addi r3, r9, VCPU_TB_RMINTR
1194 bl kvmhv_accumulate_time
1195 ld r5, VCPU_GPR(R5)(r9)
1196 ld r6, VCPU_GPR(R6)(r9)
1197 ld r7, VCPU_GPR(R7)(r9)
1198 ld r8, VCPU_GPR(R8)(r9)
1201 /* Save HEIR (HV emulation assist reg) in emul_inst
1202 if this is an HEI (HV emulation interrupt, e40) */
1203 li r3,KVM_INST_FETCH_FAILED
1204 stw r3,VCPU_LAST_INST(r9)
1205 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1208 11: stw r3,VCPU_HEIR(r9)
1210 /* these are volatile across C function calls */
1211 #ifdef CONFIG_RELOCATABLE
1212 ld r3, HSTATE_SCRATCH1(r13)
1218 std r3, VCPU_CTR(r9)
1219 std r4, VCPU_XER(r9)
1221 /* If this is a page table miss then see if it's theirs or ours */
1222 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1224 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1227 /* See if this is a leftover HDEC interrupt */
1228 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1233 bge fast_guest_return
1235 /* See if this is an hcall we can handle in real mode */
1236 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1237 beq hcall_try_real_mode
1239 /* Hypervisor doorbell - exit only if host IPI flag set */
1240 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1242 lbz r0, HSTATE_HOST_IPI(r13)
1247 /* External interrupt ? */
1248 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1249 bne+ guest_exit_cont
1251 /* External interrupt, first check for host_ipi. If this is
1252 * set, we know the host wants us out so let's do it now
1257 * Restore the active volatile registers after returning from
1260 ld r9, HSTATE_KVM_VCPU(r13)
1261 li r12, BOOK3S_INTERRUPT_EXTERNAL
1264 * kvmppc_read_intr return codes:
1266 * Exit to host (r3 > 0)
1267 * 1 An interrupt is pending that needs to be handled by the host
1268 * Exit guest and return to host by branching to guest_exit_cont
1270 * 2 Passthrough that needs completion in the host
1271 * Exit guest and return to host by branching to guest_exit_cont
1272 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1273 * to indicate to the host to complete handling the interrupt
1275 * Before returning to guest, we check if any CPU is heading out
1276 * to the host and if so, we head out also. If no CPUs are heading
1277 * check return values <= 0.
1279 * Return to guest (r3 <= 0)
1280 * 0 No external interrupt is pending
1281 * -1 A guest wakeup IPI (which has now been cleared)
1282 * In either case, we return to guest to deliver any pending
1285 * -2 A PCI passthrough external interrupt was handled
1286 * (interrupt was delivered directly to guest)
1287 * Return to guest to deliver any pending guest interrupts.
1293 /* Return code = 2 */
1294 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1295 stw r12, VCPU_TRAP(r9)
1298 1: /* Return code <= 1 */
1302 /* Return code <= 0 */
1303 4: ld r5, HSTATE_KVM_VCORE(r13)
1304 lwz r0, VCORE_ENTRY_EXIT(r5)
1307 blt deliver_guest_interrupt
1309 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1310 /* Save more register state */
1313 std r6, VCPU_DAR(r9)
1314 stw r7, VCPU_DSISR(r9)
1315 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1316 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1318 std r6, VCPU_FAULT_DAR(r9)
1319 stw r7, VCPU_FAULT_DSISR(r9)
1321 /* See if it is a machine check */
1322 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1323 beq machine_check_realmode
1325 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1326 addi r3, r9, VCPU_TB_RMEXIT
1328 bl kvmhv_accumulate_time
1332 /* Increment exit count, poke other threads to exit */
1333 bl kvmhv_commence_exit
1335 ld r9, HSTATE_KVM_VCPU(r13)
1336 lwz r12, VCPU_TRAP(r9)
1338 /* Stop others sending VCPU interrupts to this physical CPU */
1340 stw r0, VCPU_CPU(r9)
1341 stw r0, VCPU_THREAD_CPU(r9)
1343 /* Save guest CTRL register, set runlatch to 1 */
1345 stw r6,VCPU_CTRL(r9)
1351 /* Read the guest SLB and save it away */
1353 lbz r0, KVM_RADIX(r5)
1356 bne 3f /* for radix, save 0 entries */
1357 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1362 andis. r0,r8,SLB_ESID_V@h
1364 add r8,r8,r6 /* put index in */
1366 std r8,VCPU_SLB_E(r7)
1367 std r3,VCPU_SLB_V(r7)
1368 addi r7,r7,VCPU_SLB_SIZE
1372 3: stw r5,VCPU_SLB_MAX(r9)
1375 * Save the guest PURR/SPURR
1380 ld r8,VCPU_SPURR(r9)
1381 std r5,VCPU_PURR(r9)
1382 std r6,VCPU_SPURR(r9)
1387 * Restore host PURR/SPURR and add guest times
1388 * so that the time in the guest gets accounted.
1390 ld r3,HSTATE_PURR(r13)
1391 ld r4,HSTATE_SPURR(r13)
1402 /* r5 is a guest timebase value here, convert to host TB */
1403 ld r3,HSTATE_KVM_VCORE(r13)
1404 ld r4,VCORE_TB_OFFSET(r3)
1406 std r5,VCPU_DEC_EXPIRES(r9)
1410 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1411 /* Save POWER8-specific registers */
1415 std r5, VCPU_IAMR(r9)
1416 stw r6, VCPU_PSPB(r9)
1417 std r7, VCPU_FSCR(r9)
1421 std r7, VCPU_TAR(r9)
1422 mfspr r8, SPRN_EBBHR
1423 std r8, VCPU_EBBHR(r9)
1424 mfspr r5, SPRN_EBBRR
1425 mfspr r6, SPRN_BESCR
1428 std r5, VCPU_EBBRR(r9)
1429 std r6, VCPU_BESCR(r9)
1430 stw r7, VCPU_GUEST_PID(r9)
1431 std r8, VCPU_WORT(r9)
1433 mfspr r5, SPRN_TCSCR
1435 mfspr r7, SPRN_CSIGR
1437 std r5, VCPU_TCSCR(r9)
1438 std r6, VCPU_ACOP(r9)
1439 std r7, VCPU_CSIGR(r9)
1440 std r8, VCPU_TACR(r9)
1443 mfspr r6, SPRN_PSSCR
1444 std r5, VCPU_TID(r9)
1445 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1447 std r6, VCPU_PSSCR(r9)
1448 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1450 * Restore various registers to 0, where non-zero values
1451 * set by the guest could disrupt the host.
1455 mtspr SPRN_CIABR, r0
1456 mtspr SPRN_DAWRX, r0
1459 mtspr SPRN_TCSCR, r0
1460 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1463 mtspr SPRN_MMCRS, r0
1464 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1467 /* Save and reset AMR and UAMOR before turning on the MMU */
1471 std r6,VCPU_UAMOR(r9)
1475 /* Switch DSCR back to host value */
1477 ld r7, HSTATE_DSCR(r13)
1478 std r8, VCPU_DSCR(r9)
1481 /* Save non-volatile GPRs */
1482 std r14, VCPU_GPR(R14)(r9)
1483 std r15, VCPU_GPR(R15)(r9)
1484 std r16, VCPU_GPR(R16)(r9)
1485 std r17, VCPU_GPR(R17)(r9)
1486 std r18, VCPU_GPR(R18)(r9)
1487 std r19, VCPU_GPR(R19)(r9)
1488 std r20, VCPU_GPR(R20)(r9)
1489 std r21, VCPU_GPR(R21)(r9)
1490 std r22, VCPU_GPR(R22)(r9)
1491 std r23, VCPU_GPR(R23)(r9)
1492 std r24, VCPU_GPR(R24)(r9)
1493 std r25, VCPU_GPR(R25)(r9)
1494 std r26, VCPU_GPR(R26)(r9)
1495 std r27, VCPU_GPR(R27)(r9)
1496 std r28, VCPU_GPR(R28)(r9)
1497 std r29, VCPU_GPR(R29)(r9)
1498 std r30, VCPU_GPR(R30)(r9)
1499 std r31, VCPU_GPR(R31)(r9)
1502 mfspr r3, SPRN_SPRG0
1503 mfspr r4, SPRN_SPRG1
1504 mfspr r5, SPRN_SPRG2
1505 mfspr r6, SPRN_SPRG3
1506 std r3, VCPU_SPRG0(r9)
1507 std r4, VCPU_SPRG1(r9)
1508 std r5, VCPU_SPRG2(r9)
1509 std r6, VCPU_SPRG3(r9)
1515 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1518 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1521 /* Increment yield count if they have a VPA */
1522 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1525 li r4, LPPACA_YIELDCOUNT
1530 stb r3, VCPU_VPA_DIRTY(r9)
1532 /* Save PMU registers if requested */
1533 /* r8 and cr0.eq are live here */
1536 * POWER8 seems to have a hardware bug where setting
1537 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1538 * when some counters are already negative doesn't seem
1539 * to cause a performance monitor alert (and hence interrupt).
1540 * The effect of this is that when saving the PMU state,
1541 * if there is no PMU alert pending when we read MMCR0
1542 * before freezing the counters, but one becomes pending
1543 * before we read the counters, we lose it.
1544 * To work around this, we need a way to freeze the counters
1545 * before reading MMCR0. Normally, freezing the counters
1546 * is done by writing MMCR0 (to set MMCR0[FC]) which
1547 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1548 * we can also freeze the counters using MMCR2, by writing
1549 * 1s to all the counter freeze condition bits (there are
1550 * 9 bits each for 6 counters).
1552 li r3, -1 /* set all freeze bits */
1554 mfspr r10, SPRN_MMCR2
1555 mtspr SPRN_MMCR2, r3
1557 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1559 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1560 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1561 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1562 mfspr r6, SPRN_MMCRA
1563 /* Clear MMCRA in order to disable SDAR updates */
1565 mtspr SPRN_MMCRA, r7
1567 beq 21f /* if no VPA, save PMU stuff anyway */
1568 lbz r7, LPPACA_PMCINUSE(r8)
1569 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1571 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1573 21: mfspr r5, SPRN_MMCR1
1576 std r4, VCPU_MMCR(r9)
1577 std r5, VCPU_MMCR + 8(r9)
1578 std r6, VCPU_MMCR + 16(r9)
1580 std r10, VCPU_MMCR + 24(r9)
1581 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1582 std r7, VCPU_SIAR(r9)
1583 std r8, VCPU_SDAR(r9)
1590 stw r3, VCPU_PMC(r9)
1591 stw r4, VCPU_PMC + 4(r9)
1592 stw r5, VCPU_PMC + 8(r9)
1593 stw r6, VCPU_PMC + 12(r9)
1594 stw r7, VCPU_PMC + 16(r9)
1595 stw r8, VCPU_PMC + 20(r9)
1598 std r5, VCPU_SIER(r9)
1599 BEGIN_FTR_SECTION_NESTED(96)
1600 mfspr r6, SPRN_SPMC1
1601 mfspr r7, SPRN_SPMC2
1602 mfspr r8, SPRN_MMCRS
1603 stw r6, VCPU_PMC + 24(r9)
1604 stw r7, VCPU_PMC + 28(r9)
1605 std r8, VCPU_MMCR + 32(r9)
1607 mtspr SPRN_MMCRS, r4
1608 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1609 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1617 /* Restore host values of some registers */
1619 ld r5, STACK_SLOT_TID(r1)
1620 ld r6, STACK_SLOT_PSSCR(r1)
1621 ld r7, STACK_SLOT_PID(r1)
1623 mtspr SPRN_PSSCR, r6
1625 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1628 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1631 * POWER7/POWER8 guest -> host partition switch code.
1632 * We don't have to lock against tlbies but we do
1633 * have to coordinate the hardware threads.
1635 kvmhv_switch_to_host:
1636 /* Secondary threads wait for primary to do partition switch */
1637 ld r5,HSTATE_KVM_VCORE(r13)
1638 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1639 lbz r3,HSTATE_PTID(r13)
1643 13: lbz r3,VCORE_IN_GUEST(r5)
1649 /* Primary thread waits for all the secondaries to exit guest */
1650 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1651 rlwinm r0,r3,32-8,0xff
1657 /* Did we actually switch to the guest at all? */
1658 lbz r6, VCORE_IN_GUEST(r5)
1662 /* Primary thread switches back to host partition */
1663 lwz r7,KVM_HOST_LPID(r4)
1665 ld r6,KVM_HOST_SDR1(r4)
1666 li r8,LPID_RSVD /* switch to reserved LPID */
1669 mtspr SPRN_SDR1,r6 /* switch to host page table */
1670 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1675 /* DPDES and VTB are shared between threads */
1676 mfspr r7, SPRN_DPDES
1678 std r7, VCORE_DPDES(r5)
1679 std r8, VCORE_VTB(r5)
1680 /* clear DPDES so we don't get guest doorbells in the host */
1682 mtspr SPRN_DPDES, r8
1683 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1685 /* If HMI, call kvmppc_realmode_hmi_handler() */
1686 cmpwi r12, BOOK3S_INTERRUPT_HMI
1688 bl kvmppc_realmode_hmi_handler
1690 li r12, BOOK3S_INTERRUPT_HMI
1692 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1693 * the TB. Hence it is not required to subtract guest timebase
1694 * offset from timebase. So, skip it.
1696 * Also, do not call kvmppc_subcore_exit_guest() because it has
1697 * been invoked as part of kvmppc_realmode_hmi_handler().
1702 /* Subtract timebase offset from timebase */
1703 ld r8,VCORE_TB_OFFSET(r5)
1706 mftb r6 /* current guest timebase */
1708 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1709 mftb r7 /* check if lower 24 bits overflowed */
1714 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1717 17: bl kvmppc_subcore_exit_guest
1719 30: ld r5,HSTATE_KVM_VCORE(r13)
1720 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1723 ld r0, VCORE_PCR(r5)
1729 /* Signal secondary CPUs to continue */
1730 stb r0,VCORE_IN_GUEST(r5)
1731 19: lis r8,0x7fff /* MAX_INT@h */
1734 16: ld r8,KVM_HOST_LPCR(r4)
1738 /* load host SLB entries */
1739 BEGIN_MMU_FTR_SECTION
1741 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1742 ld r8,PACA_SLBSHADOWPTR(r13)
1744 .rept SLB_NUM_BOLTED
1745 li r3, SLBSHADOW_SAVEAREA
1749 andis. r7,r5,SLB_ESID_V@h
1755 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1756 /* Finish timing, if we have a vcpu */
1757 ld r4, HSTATE_KVM_VCPU(r13)
1761 bl kvmhv_accumulate_time
1764 /* Unset guest mode */
1765 li r0, KVM_GUEST_MODE_NONE
1766 stb r0, HSTATE_IN_GUEST(r13)
1768 ld r0, 112+PPC_LR_STKOFF(r1)
1774 * Check whether an HDSI is an HPTE not found fault or something else.
1775 * If it is an HPTE not found fault that is due to the guest accessing
1776 * a page that they have mapped but which we have paged out, then
1777 * we continue on with the guest exit path. In all other cases,
1778 * reflect the HDSI to the guest as a DSI.
1782 lbz r0, KVM_RADIX(r3)
1785 mfspr r6, SPRN_HDSISR
1786 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
1787 /* HPTE not found fault or protection fault? */
1788 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1789 beq 1f /* if not, send it to the guest */
1791 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1793 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1794 andi. r0, r11, MSR_DR /* data relocation enabled? */
1797 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1798 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1799 bne 7f /* if no SLB entry found */
1800 4: std r4, VCPU_FAULT_DAR(r9)
1801 stw r6, VCPU_FAULT_DSISR(r9)
1803 /* Search the hash table. */
1804 mr r3, r9 /* vcpu pointer */
1805 li r7, 1 /* data fault */
1806 bl kvmppc_hpte_hv_fault
1807 ld r9, HSTATE_KVM_VCPU(r13)
1809 ld r11, VCPU_MSR(r9)
1810 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1811 cmpdi r3, 0 /* retry the instruction */
1813 cmpdi r3, -1 /* handle in kernel mode */
1815 cmpdi r3, -2 /* MMIO emulation; need instr word */
1818 /* Synthesize a DSI (or DSegI) for the guest */
1819 ld r4, VCPU_FAULT_DAR(r9)
1821 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1822 mtspr SPRN_DSISR, r6
1823 7: mtspr SPRN_DAR, r4
1824 mtspr SPRN_SRR0, r10
1825 mtspr SPRN_SRR1, r11
1827 bl kvmppc_msr_interrupt
1828 fast_interrupt_c_return:
1829 6: ld r7, VCPU_CTR(r9)
1836 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1837 ld r5, KVM_VRMA_SLB_V(r5)
1840 /* If this is for emulated MMIO, load the instruction word */
1841 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1843 /* Set guest mode to 'jump over instruction' so if lwz faults
1844 * we'll just continue at the next IP. */
1845 li r0, KVM_GUEST_MODE_SKIP
1846 stb r0, HSTATE_IN_GUEST(r13)
1848 /* Do the access with MSR:DR enabled */
1850 ori r4, r3, MSR_DR /* Enable paging for data */
1855 /* Store the result */
1856 stw r8, VCPU_LAST_INST(r9)
1858 /* Unset guest mode. */
1859 li r0, KVM_GUEST_MODE_HOST_HV
1860 stb r0, HSTATE_IN_GUEST(r13)
1864 std r4, VCPU_FAULT_DAR(r9)
1865 stw r6, VCPU_FAULT_DSISR(r9)
1868 std r5, VCPU_FAULT_GPA(r9)
1872 * Similarly for an HISI, reflect it to the guest as an ISI unless
1873 * it is an HPTE not found fault for a page that we have paged out.
1877 lbz r0, KVM_RADIX(r3)
1879 bne .Lradix_hisi /* for radix, just save ASDR */
1880 andis. r0, r11, SRR1_ISI_NOPT@h
1883 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1885 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1886 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1889 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1890 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1891 bne 7f /* if no SLB entry found */
1893 /* Search the hash table. */
1894 mr r3, r9 /* vcpu pointer */
1897 li r7, 0 /* instruction fault */
1898 bl kvmppc_hpte_hv_fault
1899 ld r9, HSTATE_KVM_VCPU(r13)
1901 ld r11, VCPU_MSR(r9)
1902 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1903 cmpdi r3, 0 /* retry the instruction */
1904 beq fast_interrupt_c_return
1905 cmpdi r3, -1 /* handle in kernel mode */
1908 /* Synthesize an ISI (or ISegI) for the guest */
1910 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
1911 7: mtspr SPRN_SRR0, r10
1912 mtspr SPRN_SRR1, r11
1914 bl kvmppc_msr_interrupt
1915 b fast_interrupt_c_return
1917 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1918 ld r5, KVM_VRMA_SLB_V(r6)
1922 * Try to handle an hcall in real mode.
1923 * Returns to the guest if we handle it, or continues on up to
1924 * the kernel if we can't (i.e. if we don't have a handler for
1925 * it, or if the handler returns H_TOO_HARD).
1927 * r5 - r8 contain hcall args,
1928 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1930 hcall_try_real_mode:
1931 ld r3,VCPU_GPR(R3)(r9)
1933 /* sc 1 from userspace - reflect to guest syscall */
1934 bne sc_1_fast_return
1936 cmpldi r3,hcall_real_table_end - hcall_real_table
1938 /* See if this hcall is enabled for in-kernel handling */
1940 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1941 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1943 ld r0, KVM_ENABLED_HCALLS(r4)
1944 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1948 /* Get pointer to handler, if any, and call it */
1949 LOAD_REG_ADDR(r4, hcall_real_table)
1955 mr r3,r9 /* get vcpu pointer */
1956 ld r4,VCPU_GPR(R4)(r9)
1959 beq hcall_real_fallback
1960 ld r4,HSTATE_KVM_VCPU(r13)
1961 std r3,VCPU_GPR(R3)(r4)
1969 li r10, BOOK3S_INTERRUPT_SYSCALL
1970 bl kvmppc_msr_interrupt
1974 /* We've attempted a real mode hcall, but it's punted it back
1975 * to userspace. We need to restore some clobbered volatiles
1976 * before resuming the pass-it-to-qemu path */
1977 hcall_real_fallback:
1978 li r12,BOOK3S_INTERRUPT_SYSCALL
1979 ld r9, HSTATE_KVM_VCPU(r13)
1983 .globl hcall_real_table
1985 .long 0 /* 0 - unused */
1986 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1987 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1988 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1989 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1990 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1991 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1992 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1993 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
1994 .long 0 /* 0x24 - H_SET_SPRG0 */
1995 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2010 #ifdef CONFIG_KVM_XICS
2011 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2012 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2013 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2014 .long 0 /* 0x70 - H_IPOLL */
2015 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2017 .long 0 /* 0x64 - H_EOI */
2018 .long 0 /* 0x68 - H_CPPR */
2019 .long 0 /* 0x6c - H_IPI */
2020 .long 0 /* 0x70 - H_IPOLL */
2021 .long 0 /* 0x74 - H_XIRR */
2049 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2050 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2066 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2070 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2071 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2072 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2185 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2186 .globl hcall_real_table_end
2187 hcall_real_table_end:
2189 _GLOBAL(kvmppc_h_set_xdabr)
2190 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2192 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2195 6: li r3, H_PARAMETER
2198 _GLOBAL(kvmppc_h_set_dabr)
2199 li r5, DABRX_USER | DABRX_KERNEL
2203 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2204 std r4,VCPU_DABR(r3)
2205 stw r5, VCPU_DABRX(r3)
2206 mtspr SPRN_DABRX, r5
2207 /* Work around P7 bug where DABR can get corrupted on mtspr */
2208 1: mtspr SPRN_DABR,r4
2216 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2217 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2218 rlwimi r5, r4, 2, DAWRX_WT
2220 std r4, VCPU_DAWR(r3)
2221 std r5, VCPU_DAWRX(r3)
2223 mtspr SPRN_DAWRX, r5
2227 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2229 std r11,VCPU_MSR(r3)
2231 stb r0,VCPU_CEDED(r3)
2232 sync /* order setting ceded vs. testing prodded */
2233 lbz r5,VCPU_PRODDED(r3)
2235 bne kvm_cede_prodded
2236 li r12,0 /* set trap to 0 to say hcall is handled */
2237 stw r12,VCPU_TRAP(r3)
2239 std r0,VCPU_GPR(R3)(r3)
2242 * Set our bit in the bitmask of napping threads unless all the
2243 * other threads are already napping, in which case we send this
2246 ld r5,HSTATE_KVM_VCORE(r13)
2247 lbz r6,HSTATE_PTID(r13)
2248 lwz r8,VCORE_ENTRY_EXIT(r5)
2252 addi r6,r5,VCORE_NAPPING_THREADS
2259 /* order napping_threads update vs testing entry_exit_map */
2262 stb r0,HSTATE_NAPPING(r13)
2263 lwz r7,VCORE_ENTRY_EXIT(r5)
2265 bge 33f /* another thread already exiting */
2268 * Although not specifically required by the architecture, POWER7
2269 * preserves the following registers in nap mode, even if an SMT mode
2270 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2271 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2273 /* Save non-volatile GPRs */
2274 std r14, VCPU_GPR(R14)(r3)
2275 std r15, VCPU_GPR(R15)(r3)
2276 std r16, VCPU_GPR(R16)(r3)
2277 std r17, VCPU_GPR(R17)(r3)
2278 std r18, VCPU_GPR(R18)(r3)
2279 std r19, VCPU_GPR(R19)(r3)
2280 std r20, VCPU_GPR(R20)(r3)
2281 std r21, VCPU_GPR(R21)(r3)
2282 std r22, VCPU_GPR(R22)(r3)
2283 std r23, VCPU_GPR(R23)(r3)
2284 std r24, VCPU_GPR(R24)(r3)
2285 std r25, VCPU_GPR(R25)(r3)
2286 std r26, VCPU_GPR(R26)(r3)
2287 std r27, VCPU_GPR(R27)(r3)
2288 std r28, VCPU_GPR(R28)(r3)
2289 std r29, VCPU_GPR(R29)(r3)
2290 std r30, VCPU_GPR(R30)(r3)
2291 std r31, VCPU_GPR(R31)(r3)
2296 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2298 ld r9, HSTATE_KVM_VCPU(r13)
2300 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2304 * Set DEC to the smaller of DEC and HDEC, so that we wake
2305 * no later than the end of our timeslice (HDEC interrupts
2306 * don't wake us from nap).
2315 /* save expiry time of guest decrementer */
2318 ld r4, HSTATE_KVM_VCPU(r13)
2319 ld r5, HSTATE_KVM_VCORE(r13)
2320 ld r6, VCORE_TB_OFFSET(r5)
2321 subf r3, r6, r3 /* convert to host TB value */
2322 std r3, VCPU_DEC_EXPIRES(r4)
2324 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2325 ld r4, HSTATE_KVM_VCPU(r13)
2326 addi r3, r4, VCPU_TB_CEDE
2327 bl kvmhv_accumulate_time
2330 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2333 * Take a nap until a decrementer or external or doobell interrupt
2334 * occurs, with PECE1 and PECE0 set in LPCR.
2335 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2336 * Also clear the runlatch bit before napping.
2339 mfspr r0, SPRN_CTRLF
2341 mtspr SPRN_CTRLT, r0
2344 stb r0,HSTATE_HWTHREAD_REQ(r13)
2346 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2348 ori r5, r5, LPCR_PECEDH
2349 rlwimi r5, r3, 0, LPCR_PECEDP
2350 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2352 kvm_nap_sequence: /* desired LPCR value in r5 */
2355 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2356 * enable state loss = 1 (allow SMT mode switch)
2357 * requested level = 0 (just stop dispatching)
2359 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2360 mtspr SPRN_PSSCR, r3
2361 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2362 li r4, LPCR_PECE_HVEE@higher
2365 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2369 std r0, HSTATE_SCRATCH0(r13)
2371 ld r0, HSTATE_SCRATCH0(r13)
2378 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2387 /* get vcpu pointer */
2388 ld r4, HSTATE_KVM_VCPU(r13)
2390 /* Woken by external or decrementer interrupt */
2391 ld r1, HSTATE_HOST_R1(r13)
2393 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2394 addi r3, r4, VCPU_TB_RMINTR
2395 bl kvmhv_accumulate_time
2398 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2400 bl kvmppc_restore_tm
2401 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2404 /* load up FP state */
2407 /* Restore guest decrementer */
2408 ld r3, VCPU_DEC_EXPIRES(r4)
2409 ld r5, HSTATE_KVM_VCORE(r13)
2410 ld r6, VCORE_TB_OFFSET(r5)
2411 add r3, r3, r6 /* convert host TB to guest TB value */
2417 ld r14, VCPU_GPR(R14)(r4)
2418 ld r15, VCPU_GPR(R15)(r4)
2419 ld r16, VCPU_GPR(R16)(r4)
2420 ld r17, VCPU_GPR(R17)(r4)
2421 ld r18, VCPU_GPR(R18)(r4)
2422 ld r19, VCPU_GPR(R19)(r4)
2423 ld r20, VCPU_GPR(R20)(r4)
2424 ld r21, VCPU_GPR(R21)(r4)
2425 ld r22, VCPU_GPR(R22)(r4)
2426 ld r23, VCPU_GPR(R23)(r4)
2427 ld r24, VCPU_GPR(R24)(r4)
2428 ld r25, VCPU_GPR(R25)(r4)
2429 ld r26, VCPU_GPR(R26)(r4)
2430 ld r27, VCPU_GPR(R27)(r4)
2431 ld r28, VCPU_GPR(R28)(r4)
2432 ld r29, VCPU_GPR(R29)(r4)
2433 ld r30, VCPU_GPR(R30)(r4)
2434 ld r31, VCPU_GPR(R31)(r4)
2436 /* Check the wake reason in SRR1 to see why we got here */
2437 bl kvmppc_check_wake_reason
2440 * Restore volatile registers since we could have called a
2441 * C routine in kvmppc_check_wake_reason
2443 * r3 tells us whether we need to return to host or not
2444 * WARNING: it gets checked further down:
2445 * should not modify r3 until this check is done.
2447 ld r4, HSTATE_KVM_VCPU(r13)
2449 /* clear our bit in vcore->napping_threads */
2450 34: ld r5,HSTATE_KVM_VCORE(r13)
2451 lbz r7,HSTATE_PTID(r13)
2454 addi r6,r5,VCORE_NAPPING_THREADS
2460 stb r0,HSTATE_NAPPING(r13)
2462 /* See if the wake reason saved in r3 means we need to exit */
2463 stw r12, VCPU_TRAP(r4)
2468 /* see if any other thread is already exiting */
2469 lwz r0,VCORE_ENTRY_EXIT(r5)
2473 b kvmppc_cede_reentry /* if not go back to guest */
2475 /* cede when already previously prodded case */
2478 stb r0,VCPU_PRODDED(r3)
2479 sync /* order testing prodded vs. clearing ceded */
2480 stb r0,VCPU_CEDED(r3)
2484 /* we've ceded but we want to give control to the host */
2486 ld r9, HSTATE_KVM_VCPU(r13)
2489 /* Try to handle a machine check in real mode */
2490 machine_check_realmode:
2491 mr r3, r9 /* get vcpu pointer */
2492 bl kvmppc_realmode_machine_check
2494 ld r9, HSTATE_KVM_VCPU(r13)
2495 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2497 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2498 * machine check interrupt (set HSRR0 to 0x200). And for handled
2499 * errors (no-fatal), just go back to guest execution with current
2500 * HSRR0 instead of exiting guest. This new approach will inject
2501 * machine check to guest for fatal error causing guest to crash.
2503 * The old code used to return to host for unhandled errors which
2504 * was causing guest to hang with soft lockups inside guest and
2505 * makes it difficult to recover guest instance.
2507 * if we receive machine check with MSR(RI=0) then deliver it to
2508 * guest as machine check causing guest to crash.
2510 ld r11, VCPU_MSR(r9)
2511 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2512 bne mc_cont /* if so, exit to host */
2513 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2514 beq 1f /* Deliver a machine check to guest */
2516 cmpdi r3, 0 /* Did we handle MCE ? */
2517 bne 2f /* Continue guest execution. */
2518 /* If not, deliver a machine check. SRR0/1 are already set */
2519 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2520 bl kvmppc_msr_interrupt
2521 2: b fast_interrupt_c_return
2524 * Check the reason we woke from nap, and take appropriate action.
2526 * 0 if nothing needs to be done
2527 * 1 if something happened that needs to be handled by the host
2528 * -1 if there was a guest wakeup (IPI or msgsnd)
2529 * -2 if we handled a PCI passthrough interrupt (returned by
2530 * kvmppc_read_intr only)
2532 * Also sets r12 to the interrupt vector for any interrupt that needs
2533 * to be handled now by the host (0x500 for external interrupt), or zero.
2534 * Modifies all volatile registers (since it may call a C function).
2535 * This routine calls kvmppc_read_intr, a C function, if an external
2536 * interrupt is pending.
2538 kvmppc_check_wake_reason:
2541 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2543 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2544 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2545 cmpwi r6, 8 /* was it an external interrupt? */
2546 beq 7f /* if so, see what it was */
2549 cmpwi r6, 6 /* was it the decrementer? */
2552 cmpwi r6, 5 /* privileged doorbell? */
2554 cmpwi r6, 3 /* hypervisor doorbell? */
2556 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2557 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2559 li r3, 1 /* anything else, return 1 */
2562 /* hypervisor doorbell */
2563 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2566 * Clear the doorbell as we will invoke the handler
2567 * explicitly in the guest exit path.
2569 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2571 /* see if it's a host IPI */
2573 lbz r0, HSTATE_HOST_IPI(r13)
2576 /* if not, return -1 */
2580 /* Woken up due to Hypervisor maintenance interrupt */
2581 4: li r12, BOOK3S_INTERRUPT_HMI
2585 /* external interrupt - create a stack frame so we can call C */
2587 std r0, PPC_LR_STKOFF(r1)
2588 stdu r1, -PPC_MIN_STKFRM(r1)
2591 li r12, BOOK3S_INTERRUPT_EXTERNAL
2596 * Return code of 2 means PCI passthrough interrupt, but
2597 * we need to return back to host to complete handling the
2598 * interrupt. Trap reason is expected in r12 by guest
2601 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2603 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2604 addi r1, r1, PPC_MIN_STKFRM
2609 * Save away FP, VMX and VSX registers.
2611 * N.B. r30 and r31 are volatile across this function,
2612 * thus it is not callable from C.
2619 #ifdef CONFIG_ALTIVEC
2621 oris r8,r8,MSR_VEC@h
2622 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2626 oris r8,r8,MSR_VSX@h
2627 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2630 addi r3,r3,VCPU_FPRS
2632 #ifdef CONFIG_ALTIVEC
2634 addi r3,r31,VCPU_VRS
2636 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2638 mfspr r6,SPRN_VRSAVE
2639 stw r6,VCPU_VRSAVE(r31)
2644 * Load up FP, VMX and VSX registers
2646 * N.B. r30 and r31 are volatile across this function,
2647 * thus it is not callable from C.
2654 #ifdef CONFIG_ALTIVEC
2656 oris r8,r8,MSR_VEC@h
2657 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2661 oris r8,r8,MSR_VSX@h
2662 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2665 addi r3,r4,VCPU_FPRS
2667 #ifdef CONFIG_ALTIVEC
2669 addi r3,r31,VCPU_VRS
2671 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2673 lwz r7,VCPU_VRSAVE(r31)
2674 mtspr SPRN_VRSAVE,r7
2679 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2681 * Save transactional state and TM-related registers.
2682 * Called with r9 pointing to the vcpu struct.
2683 * This can modify all checkpointed registers, but
2684 * restores r1, r2 and r9 (vcpu pointer) before exit.
2688 std r0, PPC_LR_STKOFF(r1)
2693 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2697 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2698 beq 1f /* TM not active in guest. */
2700 std r1, HSTATE_HOST_R1(r13)
2701 li r3, TM_CAUSE_KVM_RESCHED
2703 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2707 /* All GPRs are volatile at this point. */
2710 /* Temporarily store r13 and r9 so we have some regs to play with */
2713 std r9, PACATMSCRATCH(r13)
2714 ld r9, HSTATE_KVM_VCPU(r13)
2716 /* Get a few more GPRs free. */
2717 std r29, VCPU_GPRS_TM(29)(r9)
2718 std r30, VCPU_GPRS_TM(30)(r9)
2719 std r31, VCPU_GPRS_TM(31)(r9)
2721 /* Save away PPR and DSCR soon so don't run with user values. */
2724 mfspr r30, SPRN_DSCR
2725 ld r29, HSTATE_DSCR(r13)
2726 mtspr SPRN_DSCR, r29
2728 /* Save all but r9, r13 & r29-r31 */
2731 .if (reg != 9) && (reg != 13)
2732 std reg, VCPU_GPRS_TM(reg)(r9)
2736 /* ... now save r13 */
2738 std r4, VCPU_GPRS_TM(13)(r9)
2739 /* ... and save r9 */
2740 ld r4, PACATMSCRATCH(r13)
2741 std r4, VCPU_GPRS_TM(9)(r9)
2743 /* Reload stack pointer and TOC. */
2744 ld r1, HSTATE_HOST_R1(r13)
2747 /* Set MSR RI now we have r1 and r13 back. */
2751 /* Save away checkpinted SPRs. */
2752 std r31, VCPU_PPR_TM(r9)
2753 std r30, VCPU_DSCR_TM(r9)
2760 std r5, VCPU_LR_TM(r9)
2761 stw r6, VCPU_CR_TM(r9)
2762 std r7, VCPU_CTR_TM(r9)
2763 std r8, VCPU_AMR_TM(r9)
2764 std r10, VCPU_TAR_TM(r9)
2765 std r11, VCPU_XER_TM(r9)
2767 /* Restore r12 as trap number. */
2768 lwz r12, VCPU_TRAP(r9)
2771 addi r3, r9, VCPU_FPRS_TM
2773 addi r3, r9, VCPU_VRS_TM
2775 mfspr r6, SPRN_VRSAVE
2776 stw r6, VCPU_VRSAVE_TM(r9)
2779 * We need to save these SPRs after the treclaim so that the software
2780 * error code is recorded correctly in the TEXASR. Also the user may
2781 * change these outside of a transaction, so they must always be
2784 mfspr r5, SPRN_TFHAR
2785 mfspr r6, SPRN_TFIAR
2786 mfspr r7, SPRN_TEXASR
2787 std r5, VCPU_TFHAR(r9)
2788 std r6, VCPU_TFIAR(r9)
2789 std r7, VCPU_TEXASR(r9)
2791 ld r0, PPC_LR_STKOFF(r1)
2796 * Restore transactional state and TM-related registers.
2797 * Called with r4 pointing to the vcpu struct.
2798 * This potentially modifies all checkpointed registers.
2799 * It restores r1, r2, r4 from the PACA.
2803 std r0, PPC_LR_STKOFF(r1)
2805 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2811 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2815 * The user may change these outside of a transaction, so they must
2816 * always be context switched.
2818 ld r5, VCPU_TFHAR(r4)
2819 ld r6, VCPU_TFIAR(r4)
2820 ld r7, VCPU_TEXASR(r4)
2821 mtspr SPRN_TFHAR, r5
2822 mtspr SPRN_TFIAR, r6
2823 mtspr SPRN_TEXASR, r7
2826 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2827 beqlr /* TM not active in guest */
2828 std r1, HSTATE_HOST_R1(r13)
2830 /* Make sure the failure summary is set, otherwise we'll program check
2831 * when we trechkpt. It's possible that this might have been not set
2832 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2835 oris r7, r7, (TEXASR_FS)@h
2836 mtspr SPRN_TEXASR, r7
2839 * We need to load up the checkpointed state for the guest.
2840 * We need to do this early as it will blow away any GPRs, VSRs and
2845 addi r3, r31, VCPU_FPRS_TM
2847 addi r3, r31, VCPU_VRS_TM
2850 lwz r7, VCPU_VRSAVE_TM(r4)
2851 mtspr SPRN_VRSAVE, r7
2853 ld r5, VCPU_LR_TM(r4)
2854 lwz r6, VCPU_CR_TM(r4)
2855 ld r7, VCPU_CTR_TM(r4)
2856 ld r8, VCPU_AMR_TM(r4)
2857 ld r9, VCPU_TAR_TM(r4)
2858 ld r10, VCPU_XER_TM(r4)
2867 * Load up PPR and DSCR values but don't put them in the actual SPRs
2868 * till the last moment to avoid running with userspace PPR and DSCR for
2871 ld r29, VCPU_DSCR_TM(r4)
2872 ld r30, VCPU_PPR_TM(r4)
2874 std r2, PACATMSCRATCH(r13) /* Save TOC */
2876 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2880 /* Load GPRs r0-r28 */
2883 ld reg, VCPU_GPRS_TM(reg)(r31)
2887 mtspr SPRN_DSCR, r29
2890 /* Load final GPRs */
2891 ld 29, VCPU_GPRS_TM(29)(r31)
2892 ld 30, VCPU_GPRS_TM(30)(r31)
2893 ld 31, VCPU_GPRS_TM(31)(r31)
2895 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2898 /* Now let's get back the state we need. */
2901 ld r29, HSTATE_DSCR(r13)
2902 mtspr SPRN_DSCR, r29
2903 ld r4, HSTATE_KVM_VCPU(r13)
2904 ld r1, HSTATE_HOST_R1(r13)
2905 ld r2, PACATMSCRATCH(r13)
2907 /* Set the MSR RI since we have our registers back. */
2911 ld r0, PPC_LR_STKOFF(r1)
2917 * We come here if we get any exception or interrupt while we are
2918 * executing host real mode code while in guest MMU context.
2919 * For now just spin, but we should do something better.
2921 kvmppc_bad_host_intr:
2925 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2926 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2927 * r11 has the guest MSR value (in/out)
2928 * r9 has a vcpu pointer (in)
2929 * r0 is used as a scratch register
2931 kvmppc_msr_interrupt:
2932 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2933 cmpwi r0, 2 /* Check if we are in transactional state.. */
2934 ld r11, VCPU_INTR_MSR(r9)
2936 /* ... if transactional, change to suspended */
2938 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2942 * This works around a hardware bug on POWER8E processors, where
2943 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2944 * performance monitor interrupt. Instead, when we need to have
2945 * an interrupt pending, we have to arrange for a counter to overflow.
2949 mtspr SPRN_MMCR2, r3
2950 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2951 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2952 mtspr SPRN_MMCR0, r3
2959 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2961 * Start timing an activity
2962 * r3 = pointer to time accumulation struct, r4 = vcpu
2965 ld r5, HSTATE_KVM_VCORE(r13)
2966 lbz r6, VCORE_IN_GUEST(r5)
2968 beq 5f /* if in guest, need to */
2969 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2972 std r3, VCPU_CUR_ACTIVITY(r4)
2973 std r5, VCPU_ACTIVITY_START(r4)
2977 * Accumulate time to one activity and start another.
2978 * r3 = pointer to new time accumulation struct, r4 = vcpu
2980 kvmhv_accumulate_time:
2981 ld r5, HSTATE_KVM_VCORE(r13)
2982 lbz r8, VCORE_IN_GUEST(r5)
2984 beq 4f /* if in guest, need to */
2985 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2986 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2987 ld r6, VCPU_ACTIVITY_START(r4)
2988 std r3, VCPU_CUR_ACTIVITY(r4)
2991 std r7, VCPU_ACTIVITY_START(r4)
2995 ld r8, TAS_SEQCOUNT(r5)
2998 std r8, TAS_SEQCOUNT(r5)
3000 ld r7, TAS_TOTAL(r5)
3002 std r7, TAS_TOTAL(r5)
3008 3: std r3, TAS_MIN(r5)
3014 std r8, TAS_SEQCOUNT(r5)