2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 #ifdef __LITTLE_ENDIAN__
36 #error Need to fix lppaca and SLB shadow accesses in little endian mode
39 /* Values in HSTATE_NAPPING(r13) */
40 #define NAPPING_CEDE 1
41 #define NAPPING_NOVCPU 2
44 * Call kvmppc_hv_entry in real mode.
45 * Must be called with interrupts hard-disabled.
49 * LR = return address to continue at after eventually re-enabling MMU
51 _GLOBAL(kvmppc_hv_entry_trampoline)
53 std r0, PPC_LR_STKOFF(r1)
56 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
61 mtmsrd r0,1 /* clear RI in MSR */
67 ld r4, HSTATE_KVM_VCPU(r13)
70 /* Back from guest - restore host state and return to caller */
73 /* Restore host DABR and DABRX */
74 ld r5,HSTATE_DABR(r13)
78 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
81 ld r3,PACA_SPRG_VDSO(r13)
82 mtspr SPRN_SPRG_VDSO_WRITE,r3
84 /* Reload the host's PMU registers */
85 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
86 lbz r4, LPPACA_PMCINUSE(r3)
88 beq 23f /* skip if not */
89 lwz r3, HSTATE_PMC(r13)
90 lwz r4, HSTATE_PMC + 4(r13)
91 lwz r5, HSTATE_PMC + 8(r13)
92 lwz r6, HSTATE_PMC + 12(r13)
93 lwz r8, HSTATE_PMC + 16(r13)
94 lwz r9, HSTATE_PMC + 20(r13)
96 lwz r10, HSTATE_PMC + 24(r13)
97 lwz r11, HSTATE_PMC + 28(r13)
98 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
108 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
109 ld r3, HSTATE_MMCR(r13)
110 ld r4, HSTATE_MMCR + 8(r13)
111 ld r5, HSTATE_MMCR + 16(r13)
112 ld r6, HSTATE_MMCR + 24(r13)
113 ld r7, HSTATE_MMCR + 32(r13)
119 ld r8, HSTATE_MMCR + 40(r13)
120 ld r9, HSTATE_MMCR + 48(r13)
123 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
129 * Reload DEC. HDEC interrupts were disabled when
130 * we reloaded the host's LPCR value.
132 ld r3, HSTATE_DECEXP(r13)
138 * For external and machine check interrupts, we need
139 * to call the Linux handler to process the interrupt.
140 * We do that by jumping to absolute address 0x500 for
141 * external interrupts, or the machine_check_fwnmi label
142 * for machine checks (since firmware might have patched
143 * the vector area at 0x200). The [h]rfid at the end of the
144 * handler will return to the book3s_hv_interrupts.S code.
145 * For other interrupts we do the rfid to get back
146 * to the book3s_hv_interrupts.S code here.
148 ld r8, 112+PPC_LR_STKOFF(r1)
150 ld r7, HSTATE_HOST_MSR(r13)
152 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
153 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
156 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
158 /* RFI into the highmem handler, or branch to interrupt handler */
162 mtmsrd r6, 1 /* Clear RI in MSR */
165 beqa 0x500 /* external interrupt (PPC970) */
166 beq cr1, 13f /* machine check */
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
170 11: mtspr SPRN_HSRR0, r8
174 13: b machine_check_fwnmi
176 kvmppc_primary_no_guest:
177 /* We handle this much like a ceded vcpu */
178 /* set our bit in napping_threads */
179 ld r5, HSTATE_KVM_VCORE(r13)
180 lbz r7, HSTATE_PTID(r13)
183 addi r6, r5, VCORE_NAPPING_THREADS
188 /* order napping_threads update vs testing entry_exit_count */
191 lwz r7, VCORE_ENTRY_EXIT(r5)
193 bge kvm_novcpu_exit /* another thread already exiting */
194 li r3, NAPPING_NOVCPU
195 stb r3, HSTATE_NAPPING(r13)
197 stb r3, HSTATE_HWTHREAD_REQ(r13)
202 ld r1, HSTATE_HOST_R1(r13)
203 ld r5, HSTATE_KVM_VCORE(r13)
205 stb r0, HSTATE_NAPPING(r13)
206 stb r0, HSTATE_HWTHREAD_REQ(r13)
208 /* check the wake reason */
209 bl kvmppc_check_wake_reason
211 /* see if any other thread is already exiting */
212 lwz r0, VCORE_ENTRY_EXIT(r5)
216 /* clear our bit in napping_threads */
217 lbz r7, HSTATE_PTID(r13)
220 addi r6, r5, VCORE_NAPPING_THREADS
226 /* See if the wake reason means we need to exit */
230 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
231 ld r4, HSTATE_KVM_VCPU(r13)
239 * We come in here when wakened from nap mode.
240 * Relocation is off and most register values are lost.
241 * r13 points to the PACA.
243 .globl kvm_start_guest
247 li r0,KVM_HWTHREAD_IN_KVM
248 stb r0,HSTATE_HWTHREAD_STATE(r13)
250 /* NV GPR values from power7_idle() will no longer be valid */
252 stb r0,PACA_NAPSTATELOST(r13)
254 /* were we napping due to cede? */
255 lbz r0,HSTATE_NAPPING(r13)
256 cmpwi r0,NAPPING_CEDE
258 cmpwi r0,NAPPING_NOVCPU
259 beq kvm_novcpu_wakeup
261 ld r1,PACAEMERGSP(r13)
262 subi r1,r1,STACK_FRAME_OVERHEAD
265 * We weren't napping due to cede, so this must be a secondary
266 * thread being woken up to run a guest, or being woken up due
267 * to a stray IPI. (Or due to some machine check or hypervisor
268 * maintenance interrupt while the core is in KVM.)
271 /* Check the wake reason in SRR1 to see why we got here */
272 bl kvmppc_check_wake_reason
276 /* get vcpu pointer, NULL if we have no vcpu to run */
277 ld r4,HSTATE_KVM_VCPU(r13)
279 /* if we have no vcpu to run, go back to sleep */
282 /* Set HSTATE_DSCR(r13) to something sensible */
283 LOAD_REG_ADDR(r6, dscr_default)
285 std r6, HSTATE_DSCR(r13)
289 /* Back from the guest, go back to nap */
290 /* Clear our vcpu pointer so we don't come back in early */
292 std r0, HSTATE_KVM_VCPU(r13)
294 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
295 * the nap_count, because once the increment to nap_count is
296 * visible we could be given another vcpu.
300 /* increment the nap count and then go to nap mode */
301 ld r4, HSTATE_KVM_VCORE(r13)
302 addi r4, r4, VCORE_NAP_COUNT
309 li r0, KVM_HWTHREAD_IN_NAP
310 stb r0, HSTATE_HWTHREAD_STATE(r13)
314 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
317 std r0, HSTATE_SCRATCH0(r13)
319 ld r0, HSTATE_SCRATCH0(r13)
325 /******************************************************************************
329 *****************************************************************************/
331 .global kvmppc_hv_entry
336 * R4 = vcpu pointer (or NULL)
340 * all other volatile GPRS = free
343 std r0, PPC_LR_STKOFF(r1)
346 /* Save R1 in the PACA */
347 std r1, HSTATE_HOST_R1(r13)
349 li r6, KVM_GUEST_MODE_HOST_HV
350 stb r6, HSTATE_IN_GUEST(r13)
360 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
362 * POWER7 host -> guest partition switch code.
363 * We don't have to lock against concurrent tlbies,
364 * but we do have to coordinate across hardware threads.
366 /* Increment entry count iff exit count is zero. */
367 ld r5,HSTATE_KVM_VCORE(r13)
368 addi r9,r5,VCORE_ENTRY_EXIT
370 cmpwi r3,0x100 /* any threads starting to exit? */
371 bge secondary_too_late /* if so we're too late to the party */
376 /* Primary thread switches to guest partition. */
377 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
378 lbz r6,HSTATE_PTID(r13)
383 li r0,LPID_RSVD /* switch to reserved LPID */
386 mtspr SPRN_SDR1,r6 /* switch to partition page table */
390 /* See if we need to flush the TLB */
391 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
392 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
393 srdi r6,r6,6 /* doubleword number */
394 sldi r6,r6,3 /* address offset */
396 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
402 23: ldarx r7,0,r6 /* if set, clear the bit */
406 /* Flush the TLB of any entries for this LPID */
407 /* use arch 2.07S as a proxy for POWER8 */
409 li r6,512 /* POWER8 has 512 sets */
411 li r6,128 /* POWER7 has 128 sets */
412 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
414 li r7,0x800 /* IS field = 0b10 */
421 /* Add timebase offset onto timebase */
422 22: ld r8,VCORE_TB_OFFSET(r5)
425 mftb r6 /* current host timebase */
427 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
428 mftb r7 /* check if lower 24 bits overflowed */
433 addis r8,r8,0x100 /* if so, increment upper 40 bits */
436 /* Load guest PCR value to select appropriate compat mode */
437 37: ld r7, VCORE_PCR(r5)
444 /* DPDES is shared between threads */
445 ld r8, VCORE_DPDES(r5)
447 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
450 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
453 /* Secondary threads wait for primary to have done partition switch */
454 20: lbz r0,VCORE_IN_GUEST(r5)
458 /* Set LPCR and RMOR. */
459 10: ld r8,VCORE_LPCR(r5)
465 /* Check if HDEC expires soon */
467 cmpwi r3,512 /* 1 microsecond */
468 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
473 * PPC970 host -> guest partition switch code.
474 * We have to lock against concurrent tlbies,
475 * using native_tlbie_lock to lock against host tlbies
476 * and kvm->arch.tlbie_lock to lock against guest tlbies.
477 * We also have to invalidate the TLB since its
478 * entries aren't tagged with the LPID.
480 30: ld r5,HSTATE_KVM_VCORE(r13)
481 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
483 /* first take native_tlbie_lock */
486 .tc native_tlbie_lock[TC],native_tlbie_lock
488 ld r3,toc_tlbie_lock@toc(2)
489 #ifdef __BIG_ENDIAN__
490 lwz r8,PACA_LOCK_TOKEN(r13)
492 lwz r8,PACAPACAINDEX(r13)
501 ld r5,HSTATE_KVM_VCORE(r13)
502 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
504 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
508 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
511 stw r0,0(r3) /* drop native_tlbie_lock */
513 /* invalidate the whole TLB */
522 /* Take the guest's tlbie_lock */
523 addi r3,r9,KVM_TLBIE_LOCK
531 mtspr SPRN_SDR1,r6 /* switch to partition page table */
533 /* Set up HID4 with the guest's LPID etc. */
538 /* drop the guest's tlbie_lock */
542 /* Check if HDEC expires soon */
545 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
548 /* Enable HDEC interrupts */
551 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
561 /* Do we have a guest vcpu to run? */
563 beq kvmppc_primary_no_guest
566 /* Load up guest SLB entries */
567 lwz r5,VCPU_SLB_MAX(r4)
572 1: ld r8,VCPU_SLB_E(r6)
575 addi r6,r6,VCPU_SLB_SIZE
578 /* Increment yield count if they have a VPA */
582 lwz r5, LPPACA_YIELDCOUNT(r3)
584 stw r5, LPPACA_YIELDCOUNT(r3)
586 stb r6, VCPU_VPA_DIRTY(r4)
590 /* Save purr/spurr */
593 std r5,HSTATE_PURR(r13)
594 std r6,HSTATE_SPURR(r13)
599 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
602 /* Set partition DABR */
603 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
604 lwz r5,VCPU_DABRX(r4)
608 BEGIN_FTR_SECTION_NESTED(89)
610 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
611 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
613 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
616 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
618 /* Turn on TM/FP/VSX/VMX so we can restore them. */
624 oris r5, r5, (MSR_VEC | MSR_VSX)@h
628 * The user may change these outside of a transaction, so they must
629 * always be context switched.
631 ld r5, VCPU_TFHAR(r4)
632 ld r6, VCPU_TFIAR(r4)
633 ld r7, VCPU_TEXASR(r4)
636 mtspr SPRN_TEXASR, r7
639 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
640 beq skip_tm /* TM not active in guest */
642 /* Make sure the failure summary is set, otherwise we'll program check
643 * when we trechkpt. It's possible that this might have been not set
644 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
647 oris r7, r7, (TEXASR_FS)@h
648 mtspr SPRN_TEXASR, r7
651 * We need to load up the checkpointed state for the guest.
652 * We need to do this early as it will blow away any GPRs, VSRs and
657 addi r3, r31, VCPU_FPRS_TM
659 addi r3, r31, VCPU_VRS_TM
662 lwz r7, VCPU_VRSAVE_TM(r4)
663 mtspr SPRN_VRSAVE, r7
665 ld r5, VCPU_LR_TM(r4)
666 lwz r6, VCPU_CR_TM(r4)
667 ld r7, VCPU_CTR_TM(r4)
668 ld r8, VCPU_AMR_TM(r4)
669 ld r9, VCPU_TAR_TM(r4)
677 * Load up PPR and DSCR values but don't put them in the actual SPRs
678 * till the last moment to avoid running with userspace PPR and DSCR for
681 ld r29, VCPU_DSCR_TM(r4)
682 ld r30, VCPU_PPR_TM(r4)
684 std r2, PACATMSCRATCH(r13) /* Save TOC */
686 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
690 /* Load GPRs r0-r28 */
693 ld reg, VCPU_GPRS_TM(reg)(r31)
700 /* Load final GPRs */
701 ld 29, VCPU_GPRS_TM(29)(r31)
702 ld 30, VCPU_GPRS_TM(30)(r31)
703 ld 31, VCPU_GPRS_TM(31)(r31)
705 /* TM checkpointed state is now setup. All GPRs are now volatile. */
708 /* Now let's get back the state we need. */
711 ld r29, HSTATE_DSCR(r13)
713 ld r4, HSTATE_KVM_VCPU(r13)
714 ld r1, HSTATE_HOST_R1(r13)
715 ld r2, PACATMSCRATCH(r13)
717 /* Set the MSR RI since we have our registers back. */
723 /* Load guest PMU registers */
724 /* R4 is live here (vcpu pointer) */
726 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
727 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
729 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
730 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
731 lwz r6, VCPU_PMC + 8(r4)
732 lwz r7, VCPU_PMC + 12(r4)
733 lwz r8, VCPU_PMC + 16(r4)
734 lwz r9, VCPU_PMC + 20(r4)
736 lwz r10, VCPU_PMC + 24(r4)
737 lwz r11, VCPU_PMC + 28(r4)
738 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
748 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
750 ld r5, VCPU_MMCR + 8(r4)
751 ld r6, VCPU_MMCR + 16(r4)
759 ld r5, VCPU_MMCR + 24(r4)
761 lwz r7, VCPU_PMC + 24(r4)
762 lwz r8, VCPU_PMC + 28(r4)
763 ld r9, VCPU_MMCR + 32(r4)
769 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
773 /* Load up FP, VMX and VSX registers */
776 ld r14, VCPU_GPR(R14)(r4)
777 ld r15, VCPU_GPR(R15)(r4)
778 ld r16, VCPU_GPR(R16)(r4)
779 ld r17, VCPU_GPR(R17)(r4)
780 ld r18, VCPU_GPR(R18)(r4)
781 ld r19, VCPU_GPR(R19)(r4)
782 ld r20, VCPU_GPR(R20)(r4)
783 ld r21, VCPU_GPR(R21)(r4)
784 ld r22, VCPU_GPR(R22)(r4)
785 ld r23, VCPU_GPR(R23)(r4)
786 ld r24, VCPU_GPR(R24)(r4)
787 ld r25, VCPU_GPR(R25)(r4)
788 ld r26, VCPU_GPR(R26)(r4)
789 ld r27, VCPU_GPR(R27)(r4)
790 ld r28, VCPU_GPR(R28)(r4)
791 ld r29, VCPU_GPR(R29)(r4)
792 ld r30, VCPU_GPR(R30)(r4)
793 ld r31, VCPU_GPR(R31)(r4)
796 /* Switch DSCR to guest value */
799 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
802 /* Skip next section on POWER7 or PPC970 */
804 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
805 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
808 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
811 /* Load up POWER8-specific registers */
813 lwz r6, VCPU_PSPB(r4)
819 ld r6, VCPU_DAWRX(r4)
820 ld r7, VCPU_CIABR(r4)
830 ld r8, VCPU_EBBHR(r4)
832 ld r5, VCPU_EBBRR(r4)
833 ld r6, VCPU_BESCR(r4)
834 ld r7, VCPU_CSIGR(r4)
840 ld r5, VCPU_TCSCR(r4)
842 lwz r7, VCPU_GUEST_PID(r4)
851 * Set the decrementer to the guest decrementer.
853 ld r8,VCPU_DEC_EXPIRES(r4)
854 /* r8 is a host timebase value here, convert to guest TB */
855 ld r5,HSTATE_KVM_VCORE(r13)
856 ld r6,VCORE_TB_OFFSET(r5)
863 ld r5, VCPU_SPRG0(r4)
864 ld r6, VCPU_SPRG1(r4)
865 ld r7, VCPU_SPRG2(r4)
866 ld r8, VCPU_SPRG3(r4)
872 /* Load up DAR and DSISR */
874 lwz r6, VCPU_DSISR(r4)
879 /* Restore AMR and UAMOR, set AMOR to all 1s */
886 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
888 /* Restore state of CTRL run bit; assume 1 on entry */
902 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
910 deliver_guest_interrupt:
911 /* r11 = vcpu->arch.msr & ~MSR_HV */
912 rldicl r11, r11, 63 - MSR_HV_LG, 1
913 rotldi r11, r11, 1 + MSR_HV_LG
916 /* Check if we can deliver an external or decrementer interrupt now */
917 ld r0, VCPU_PENDING_EXC(r4)
918 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
920 andi. r8, r11, MSR_EE
923 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
924 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
927 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
929 li r0, BOOK3S_INTERRUPT_EXTERNAL
933 li r0, BOOK3S_INTERRUPT_DECREMENTER
936 12: mtspr SPRN_SRR0, r10
940 bl kvmppc_msr_interrupt
946 * R10: value for HSRR0
947 * R11: value for HSRR1
952 stb r0,VCPU_CEDED(r4) /* cancel cede */
956 /* Activate guest mode, so faults get handled by KVM */
957 li r9, KVM_GUEST_MODE_GUEST_HV
958 stb r9, HSTATE_IN_GUEST(r13)
965 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
968 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
975 ld r1, VCPU_GPR(R1)(r4)
976 ld r2, VCPU_GPR(R2)(r4)
977 ld r3, VCPU_GPR(R3)(r4)
978 ld r5, VCPU_GPR(R5)(r4)
979 ld r6, VCPU_GPR(R6)(r4)
980 ld r7, VCPU_GPR(R7)(r4)
981 ld r8, VCPU_GPR(R8)(r4)
982 ld r9, VCPU_GPR(R9)(r4)
983 ld r10, VCPU_GPR(R10)(r4)
984 ld r11, VCPU_GPR(R11)(r4)
985 ld r12, VCPU_GPR(R12)(r4)
986 ld r13, VCPU_GPR(R13)(r4)
990 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
991 ld r0, VCPU_GPR(R0)(r4)
992 ld r4, VCPU_GPR(R4)(r4)
997 /******************************************************************************
1001 *****************************************************************************/
1004 * We come here from the first-level interrupt handlers.
1006 .globl kvmppc_interrupt_hv
1007 kvmppc_interrupt_hv:
1009 * Register contents:
1010 * R12 = interrupt vector
1012 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1013 * guest R13 saved in SPRN_SCRATCH0
1015 std r9, HSTATE_SCRATCH2(r13)
1017 lbz r9, HSTATE_IN_GUEST(r13)
1018 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1019 beq kvmppc_bad_host_intr
1020 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1021 cmpwi r9, KVM_GUEST_MODE_GUEST
1022 ld r9, HSTATE_SCRATCH2(r13)
1023 beq kvmppc_interrupt_pr
1025 /* We're now back in the host but in guest MMU context */
1026 li r9, KVM_GUEST_MODE_HOST_HV
1027 stb r9, HSTATE_IN_GUEST(r13)
1029 ld r9, HSTATE_KVM_VCPU(r13)
1031 /* Save registers */
1033 std r0, VCPU_GPR(R0)(r9)
1034 std r1, VCPU_GPR(R1)(r9)
1035 std r2, VCPU_GPR(R2)(r9)
1036 std r3, VCPU_GPR(R3)(r9)
1037 std r4, VCPU_GPR(R4)(r9)
1038 std r5, VCPU_GPR(R5)(r9)
1039 std r6, VCPU_GPR(R6)(r9)
1040 std r7, VCPU_GPR(R7)(r9)
1041 std r8, VCPU_GPR(R8)(r9)
1042 ld r0, HSTATE_SCRATCH2(r13)
1043 std r0, VCPU_GPR(R9)(r9)
1044 std r10, VCPU_GPR(R10)(r9)
1045 std r11, VCPU_GPR(R11)(r9)
1046 ld r3, HSTATE_SCRATCH0(r13)
1047 lwz r4, HSTATE_SCRATCH1(r13)
1048 std r3, VCPU_GPR(R12)(r9)
1051 ld r3, HSTATE_CFAR(r13)
1052 std r3, VCPU_CFAR(r9)
1053 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1055 ld r4, HSTATE_PPR(r13)
1056 std r4, VCPU_PPR(r9)
1057 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1059 /* Restore R1/R2 so we can handle faults */
1060 ld r1, HSTATE_HOST_R1(r13)
1063 mfspr r10, SPRN_SRR0
1064 mfspr r11, SPRN_SRR1
1065 std r10, VCPU_SRR0(r9)
1066 std r11, VCPU_SRR1(r9)
1067 andi. r0, r12, 2 /* need to read HSRR0/1? */
1069 mfspr r10, SPRN_HSRR0
1070 mfspr r11, SPRN_HSRR1
1072 1: std r10, VCPU_PC(r9)
1073 std r11, VCPU_MSR(r9)
1077 std r3, VCPU_GPR(R13)(r9)
1080 stw r12,VCPU_TRAP(r9)
1082 /* Save HEIR (HV emulation assist reg) in last_inst
1083 if this is an HEI (HV emulation interrupt, e40) */
1084 li r3,KVM_INST_FETCH_FAILED
1086 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1089 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1090 11: stw r3,VCPU_LAST_INST(r9)
1092 /* these are volatile across C function calls */
1095 std r3, VCPU_CTR(r9)
1096 stw r4, VCPU_XER(r9)
1099 /* If this is a page table miss then see if it's theirs or ours */
1100 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1102 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1104 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1106 /* See if this is a leftover HDEC interrupt */
1107 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1113 /* See if this is an hcall we can handle in real mode */
1114 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1115 beq hcall_try_real_mode
1117 /* Only handle external interrupts here on arch 206 and later */
1119 b ext_interrupt_to_host
1120 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1122 /* External interrupt ? */
1123 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1124 bne+ ext_interrupt_to_host
1126 /* External interrupt, first check for host_ipi. If this is
1127 * set, we know the host wants us out so let's do it now
1131 bgt ext_interrupt_to_host
1133 /* Check if any CPU is heading out to the host, if so head out too */
1134 ld r5, HSTATE_KVM_VCORE(r13)
1135 lwz r0, VCORE_ENTRY_EXIT(r5)
1137 bge ext_interrupt_to_host
1139 /* Return to guest after delivering any pending interrupt */
1141 b deliver_guest_interrupt
1143 ext_interrupt_to_host:
1145 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1146 /* Save more register state */
1149 std r6, VCPU_DAR(r9)
1150 stw r7, VCPU_DSISR(r9)
1152 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1153 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1155 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1156 std r6, VCPU_FAULT_DAR(r9)
1157 stw r7, VCPU_FAULT_DSISR(r9)
1159 /* See if it is a machine check */
1160 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1161 beq machine_check_realmode
1164 /* Save guest CTRL register, set runlatch to 1 */
1165 6: mfspr r6,SPRN_CTRLF
1166 stw r6,VCPU_CTRL(r9)
1172 /* Read the guest SLB and save it away */
1173 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1179 andis. r0,r8,SLB_ESID_V@h
1181 add r8,r8,r6 /* put index in */
1183 std r8,VCPU_SLB_E(r7)
1184 std r3,VCPU_SLB_V(r7)
1185 addi r7,r7,VCPU_SLB_SIZE
1189 stw r5,VCPU_SLB_MAX(r9)
1192 * Save the guest PURR/SPURR
1198 ld r8,VCPU_SPURR(r9)
1199 std r5,VCPU_PURR(r9)
1200 std r6,VCPU_SPURR(r9)
1205 * Restore host PURR/SPURR and add guest times
1206 * so that the time in the guest gets accounted.
1208 ld r3,HSTATE_PURR(r13)
1209 ld r4,HSTATE_SPURR(r13)
1214 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1221 /* r5 is a guest timebase value here, convert to host TB */
1222 ld r3,HSTATE_KVM_VCORE(r13)
1223 ld r4,VCORE_TB_OFFSET(r3)
1225 std r5,VCPU_DEC_EXPIRES(r9)
1229 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1230 /* Save POWER8-specific registers */
1234 std r5, VCPU_IAMR(r9)
1235 stw r6, VCPU_PSPB(r9)
1236 std r7, VCPU_FSCR(r9)
1241 std r6, VCPU_VTB(r9)
1242 std r7, VCPU_TAR(r9)
1243 mfspr r8, SPRN_EBBHR
1244 std r8, VCPU_EBBHR(r9)
1245 mfspr r5, SPRN_EBBRR
1246 mfspr r6, SPRN_BESCR
1247 mfspr r7, SPRN_CSIGR
1249 std r5, VCPU_EBBRR(r9)
1250 std r6, VCPU_BESCR(r9)
1251 std r7, VCPU_CSIGR(r9)
1252 std r8, VCPU_TACR(r9)
1253 mfspr r5, SPRN_TCSCR
1257 std r5, VCPU_TCSCR(r9)
1258 std r6, VCPU_ACOP(r9)
1259 stw r7, VCPU_GUEST_PID(r9)
1260 std r8, VCPU_WORT(r9)
1263 /* Save and reset AMR and UAMOR before turning on the MMU */
1268 std r6,VCPU_UAMOR(r9)
1271 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1273 /* Switch DSCR back to host value */
1276 ld r7, HSTATE_DSCR(r13)
1277 std r8, VCPU_DSCR(r9)
1279 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1281 /* Save non-volatile GPRs */
1282 std r14, VCPU_GPR(R14)(r9)
1283 std r15, VCPU_GPR(R15)(r9)
1284 std r16, VCPU_GPR(R16)(r9)
1285 std r17, VCPU_GPR(R17)(r9)
1286 std r18, VCPU_GPR(R18)(r9)
1287 std r19, VCPU_GPR(R19)(r9)
1288 std r20, VCPU_GPR(R20)(r9)
1289 std r21, VCPU_GPR(R21)(r9)
1290 std r22, VCPU_GPR(R22)(r9)
1291 std r23, VCPU_GPR(R23)(r9)
1292 std r24, VCPU_GPR(R24)(r9)
1293 std r25, VCPU_GPR(R25)(r9)
1294 std r26, VCPU_GPR(R26)(r9)
1295 std r27, VCPU_GPR(R27)(r9)
1296 std r28, VCPU_GPR(R28)(r9)
1297 std r29, VCPU_GPR(R29)(r9)
1298 std r30, VCPU_GPR(R30)(r9)
1299 std r31, VCPU_GPR(R31)(r9)
1302 mfspr r3, SPRN_SPRG0
1303 mfspr r4, SPRN_SPRG1
1304 mfspr r5, SPRN_SPRG2
1305 mfspr r6, SPRN_SPRG3
1306 std r3, VCPU_SPRG0(r9)
1307 std r4, VCPU_SPRG1(r9)
1308 std r5, VCPU_SPRG2(r9)
1309 std r6, VCPU_SPRG3(r9)
1315 /* Increment yield count if they have a VPA */
1316 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1319 lwz r3, LPPACA_YIELDCOUNT(r8)
1321 stw r3, LPPACA_YIELDCOUNT(r8)
1323 stb r3, VCPU_VPA_DIRTY(r9)
1325 /* Save PMU registers if requested */
1326 /* r8 and cr0.eq are live here */
1328 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1329 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1330 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1331 mfspr r6, SPRN_MMCRA
1333 /* On P7, clear MMCRA in order to disable SDAR updates */
1335 mtspr SPRN_MMCRA, r7
1336 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1338 beq 21f /* if no VPA, save PMU stuff anyway */
1339 lbz r7, LPPACA_PMCINUSE(r8)
1340 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1342 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1344 21: mfspr r5, SPRN_MMCR1
1347 std r4, VCPU_MMCR(r9)
1348 std r5, VCPU_MMCR + 8(r9)
1349 std r6, VCPU_MMCR + 16(r9)
1350 std r7, VCPU_SIAR(r9)
1351 std r8, VCPU_SDAR(r9)
1359 mfspr r10, SPRN_PMC7
1360 mfspr r11, SPRN_PMC8
1361 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1362 stw r3, VCPU_PMC(r9)
1363 stw r4, VCPU_PMC + 4(r9)
1364 stw r5, VCPU_PMC + 8(r9)
1365 stw r6, VCPU_PMC + 12(r9)
1366 stw r7, VCPU_PMC + 16(r9)
1367 stw r8, VCPU_PMC + 20(r9)
1369 stw r10, VCPU_PMC + 24(r9)
1370 stw r11, VCPU_PMC + 28(r9)
1371 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1373 mfspr r4, SPRN_MMCR2
1375 mfspr r6, SPRN_SPMC1
1376 mfspr r7, SPRN_SPMC2
1377 mfspr r8, SPRN_MMCRS
1378 std r4, VCPU_MMCR + 24(r9)
1379 std r5, VCPU_SIER(r9)
1380 stw r6, VCPU_PMC + 24(r9)
1381 stw r7, VCPU_PMC + 28(r9)
1382 std r8, VCPU_MMCR + 32(r9)
1384 mtspr SPRN_MMCRS, r4
1385 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1393 hdec_soon: /* r12 = trap, r13 = paca */
1396 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1398 * POWER7 guest -> host partition switch code.
1399 * We don't have to lock against tlbies but we do
1400 * have to coordinate the hardware threads.
1402 /* Increment the threads-exiting-guest count in the 0xff00
1403 bits of vcore->entry_exit_count */
1404 ld r5,HSTATE_KVM_VCORE(r13)
1405 addi r6,r5,VCORE_ENTRY_EXIT
1410 isync /* order stwcx. vs. reading napping_threads */
1413 * At this point we have an interrupt that we have to pass
1414 * up to the kernel or qemu; we can't handle it in real mode.
1415 * Thus we have to do a partition switch, so we have to
1416 * collect the other threads, if we are the first thread
1417 * to take an interrupt. To do this, we set the HDEC to 0,
1418 * which causes an HDEC interrupt in all threads within 2ns
1419 * because the HDEC register is shared between all 4 threads.
1420 * However, we don't need to bother if this is an HDEC
1421 * interrupt, since the other threads will already be on their
1422 * way here in that case.
1424 cmpwi r3,0x100 /* Are we the first here? */
1426 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1432 * Send an IPI to any napping threads, since an HDEC interrupt
1433 * doesn't wake CPUs up from nap.
1435 lwz r3,VCORE_NAPPING_THREADS(r5)
1436 lbz r4,HSTATE_PTID(r13)
1439 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1441 /* Order entry/exit update vs. IPIs */
1443 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1447 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1450 stbcix r0,r7,r8 /* trigger the IPI */
1452 addi r6,r6,PACA_SIZE
1456 /* Secondary threads wait for primary to do partition switch */
1457 43: ld r5,HSTATE_KVM_VCORE(r13)
1458 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1459 lbz r3,HSTATE_PTID(r13)
1463 13: lbz r3,VCORE_IN_GUEST(r5)
1469 /* Primary thread waits for all the secondaries to exit guest */
1470 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1477 /* Primary thread switches back to host partition */
1478 ld r6,KVM_HOST_SDR1(r4)
1479 lwz r7,KVM_HOST_LPID(r4)
1480 li r8,LPID_RSVD /* switch to reserved LPID */
1483 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1488 /* DPDES is shared between threads */
1489 mfspr r7, SPRN_DPDES
1490 std r7, VCORE_DPDES(r5)
1491 /* clear DPDES so we don't get guest doorbells in the host */
1493 mtspr SPRN_DPDES, r8
1494 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1496 /* Subtract timebase offset from timebase */
1497 ld r8,VCORE_TB_OFFSET(r5)
1500 mftb r6 /* current guest timebase */
1502 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1503 mftb r7 /* check if lower 24 bits overflowed */
1508 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1512 17: ld r0, VCORE_PCR(r5)
1518 /* Signal secondary CPUs to continue */
1519 stb r0,VCORE_IN_GUEST(r5)
1520 lis r8,0x7fff /* MAX_INT@h */
1523 16: ld r8,KVM_HOST_LPCR(r4)
1529 * PPC970 guest -> host partition switch code.
1530 * We have to lock against concurrent tlbies, and
1531 * we have to flush the whole TLB.
1533 32: ld r5,HSTATE_KVM_VCORE(r13)
1534 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1536 /* Take the guest's tlbie_lock */
1537 #ifdef __BIG_ENDIAN__
1538 lwz r8,PACA_LOCK_TOKEN(r13)
1540 lwz r8,PACAPACAINDEX(r13)
1542 addi r3,r4,KVM_TLBIE_LOCK
1550 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1552 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1556 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1559 stw r0,0(r3) /* drop guest tlbie_lock */
1561 /* invalidate the whole TLB */
1570 /* take native_tlbie_lock */
1571 ld r3,toc_tlbie_lock@toc(2)
1579 ld r6,KVM_HOST_SDR1(r4)
1580 mtspr SPRN_SDR1,r6 /* switch to host page table */
1582 /* Set up host HID4 value */
1587 stw r0,0(r3) /* drop native_tlbie_lock */
1589 lis r8,0x7fff /* MAX_INT@h */
1592 /* Disable HDEC interrupts */
1595 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1605 /* load host SLB entries */
1606 33: ld r8,PACA_SLBSHADOWPTR(r13)
1608 .rept SLB_NUM_BOLTED
1609 ld r5,SLBSHADOW_SAVEAREA(r8)
1610 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1611 andis. r7,r5,SLB_ESID_V@h
1617 /* Unset guest mode */
1618 li r0, KVM_GUEST_MODE_NONE
1619 stb r0, HSTATE_IN_GUEST(r13)
1621 ld r0, 112+PPC_LR_STKOFF(r1)
1627 * Check whether an HDSI is an HPTE not found fault or something else.
1628 * If it is an HPTE not found fault that is due to the guest accessing
1629 * a page that they have mapped but which we have paged out, then
1630 * we continue on with the guest exit path. In all other cases,
1631 * reflect the HDSI to the guest as a DSI.
1635 mfspr r6, SPRN_HDSISR
1636 /* HPTE not found fault or protection fault? */
1637 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1638 beq 1f /* if not, send it to the guest */
1639 andi. r0, r11, MSR_DR /* data relocation enabled? */
1642 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1643 bne 1f /* if no SLB entry found */
1644 4: std r4, VCPU_FAULT_DAR(r9)
1645 stw r6, VCPU_FAULT_DSISR(r9)
1647 /* Search the hash table. */
1648 mr r3, r9 /* vcpu pointer */
1649 li r7, 1 /* data fault */
1650 bl .kvmppc_hpte_hv_fault
1651 ld r9, HSTATE_KVM_VCPU(r13)
1653 ld r11, VCPU_MSR(r9)
1654 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1655 cmpdi r3, 0 /* retry the instruction */
1657 cmpdi r3, -1 /* handle in kernel mode */
1659 cmpdi r3, -2 /* MMIO emulation; need instr word */
1662 /* Synthesize a DSI for the guest */
1663 ld r4, VCPU_FAULT_DAR(r9)
1665 1: mtspr SPRN_DAR, r4
1666 mtspr SPRN_DSISR, r6
1667 mtspr SPRN_SRR0, r10
1668 mtspr SPRN_SRR1, r11
1669 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1670 bl kvmppc_msr_interrupt
1671 fast_interrupt_c_return:
1672 6: ld r7, VCPU_CTR(r9)
1673 lwz r8, VCPU_XER(r9)
1679 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1680 ld r5, KVM_VRMA_SLB_V(r5)
1683 /* If this is for emulated MMIO, load the instruction word */
1684 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1686 /* Set guest mode to 'jump over instruction' so if lwz faults
1687 * we'll just continue at the next IP. */
1688 li r0, KVM_GUEST_MODE_SKIP
1689 stb r0, HSTATE_IN_GUEST(r13)
1691 /* Do the access with MSR:DR enabled */
1693 ori r4, r3, MSR_DR /* Enable paging for data */
1698 /* Store the result */
1699 stw r8, VCPU_LAST_INST(r9)
1701 /* Unset guest mode. */
1702 li r0, KVM_GUEST_MODE_HOST_HV
1703 stb r0, HSTATE_IN_GUEST(r13)
1707 * Similarly for an HISI, reflect it to the guest as an ISI unless
1708 * it is an HPTE not found fault for a page that we have paged out.
1711 andis. r0, r11, SRR1_ISI_NOPT@h
1713 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1716 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1717 bne 1f /* if no SLB entry found */
1719 /* Search the hash table. */
1720 mr r3, r9 /* vcpu pointer */
1723 li r7, 0 /* instruction fault */
1724 bl .kvmppc_hpte_hv_fault
1725 ld r9, HSTATE_KVM_VCPU(r13)
1727 ld r11, VCPU_MSR(r9)
1728 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1729 cmpdi r3, 0 /* retry the instruction */
1730 beq fast_interrupt_c_return
1731 cmpdi r3, -1 /* handle in kernel mode */
1734 /* Synthesize an ISI for the guest */
1736 1: mtspr SPRN_SRR0, r10
1737 mtspr SPRN_SRR1, r11
1738 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1739 bl kvmppc_msr_interrupt
1740 b fast_interrupt_c_return
1742 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1743 ld r5, KVM_VRMA_SLB_V(r6)
1747 * Try to handle an hcall in real mode.
1748 * Returns to the guest if we handle it, or continues on up to
1749 * the kernel if we can't (i.e. if we don't have a handler for
1750 * it, or if the handler returns H_TOO_HARD).
1752 .globl hcall_try_real_mode
1753 hcall_try_real_mode:
1754 ld r3,VCPU_GPR(R3)(r9)
1756 /* sc 1 from userspace - reflect to guest syscall */
1757 bne sc_1_fast_return
1759 cmpldi r3,hcall_real_table_end - hcall_real_table
1761 LOAD_REG_ADDR(r4, hcall_real_table)
1767 mr r3,r9 /* get vcpu pointer */
1768 ld r4,VCPU_GPR(R4)(r9)
1771 beq hcall_real_fallback
1772 ld r4,HSTATE_KVM_VCPU(r13)
1773 std r3,VCPU_GPR(R3)(r4)
1781 li r10, BOOK3S_INTERRUPT_SYSCALL
1782 bl kvmppc_msr_interrupt
1786 /* We've attempted a real mode hcall, but it's punted it back
1787 * to userspace. We need to restore some clobbered volatiles
1788 * before resuming the pass-it-to-qemu path */
1789 hcall_real_fallback:
1790 li r12,BOOK3S_INTERRUPT_SYSCALL
1791 ld r9, HSTATE_KVM_VCPU(r13)
1795 .globl hcall_real_table
1797 .long 0 /* 0 - unused */
1798 .long .kvmppc_h_remove - hcall_real_table
1799 .long .kvmppc_h_enter - hcall_real_table
1800 .long .kvmppc_h_read - hcall_real_table
1801 .long 0 /* 0x10 - H_CLEAR_MOD */
1802 .long 0 /* 0x14 - H_CLEAR_REF */
1803 .long .kvmppc_h_protect - hcall_real_table
1804 .long .kvmppc_h_get_tce - hcall_real_table
1805 .long .kvmppc_h_put_tce - hcall_real_table
1806 .long 0 /* 0x24 - H_SET_SPRG0 */
1807 .long .kvmppc_h_set_dabr - hcall_real_table
1822 #ifdef CONFIG_KVM_XICS
1823 .long .kvmppc_rm_h_eoi - hcall_real_table
1824 .long .kvmppc_rm_h_cppr - hcall_real_table
1825 .long .kvmppc_rm_h_ipi - hcall_real_table
1826 .long 0 /* 0x70 - H_IPOLL */
1827 .long .kvmppc_rm_h_xirr - hcall_real_table
1829 .long 0 /* 0x64 - H_EOI */
1830 .long 0 /* 0x68 - H_CPPR */
1831 .long 0 /* 0x6c - H_IPI */
1832 .long 0 /* 0x70 - H_IPOLL */
1833 .long 0 /* 0x74 - H_XIRR */
1861 .long .kvmppc_h_cede - hcall_real_table
1878 .long .kvmppc_h_bulk_remove - hcall_real_table
1882 .long .kvmppc_h_set_xdabr - hcall_real_table
1883 hcall_real_table_end:
1889 _GLOBAL(kvmppc_h_set_xdabr)
1890 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1892 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1895 6: li r3, H_PARAMETER
1898 _GLOBAL(kvmppc_h_set_dabr)
1899 li r5, DABRX_USER | DABRX_KERNEL
1903 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1904 std r4,VCPU_DABR(r3)
1905 stw r5, VCPU_DABRX(r3)
1906 mtspr SPRN_DABRX, r5
1907 /* Work around P7 bug where DABR can get corrupted on mtspr */
1908 1: mtspr SPRN_DABR,r4
1916 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
1917 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1918 rlwimi r5, r4, 1, DAWRX_WT
1920 std r4, VCPU_DAWR(r3)
1921 std r5, VCPU_DAWRX(r3)
1923 mtspr SPRN_DAWRX, r5
1927 _GLOBAL(kvmppc_h_cede)
1929 std r11,VCPU_MSR(r3)
1931 stb r0,VCPU_CEDED(r3)
1932 sync /* order setting ceded vs. testing prodded */
1933 lbz r5,VCPU_PRODDED(r3)
1935 bne kvm_cede_prodded
1936 li r0,0 /* set trap to 0 to say hcall is handled */
1937 stw r0,VCPU_TRAP(r3)
1939 std r0,VCPU_GPR(R3)(r3)
1941 b kvm_cede_exit /* just send it up to host on 970 */
1942 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1945 * Set our bit in the bitmask of napping threads unless all the
1946 * other threads are already napping, in which case we send this
1949 ld r5,HSTATE_KVM_VCORE(r13)
1950 lbz r6,HSTATE_PTID(r13)
1951 lwz r8,VCORE_ENTRY_EXIT(r5)
1955 addi r6,r5,VCORE_NAPPING_THREADS
1963 /* order napping_threads update vs testing entry_exit_count */
1966 stb r0,HSTATE_NAPPING(r13)
1967 lwz r7,VCORE_ENTRY_EXIT(r5)
1969 bge 33f /* another thread already exiting */
1972 * Although not specifically required by the architecture, POWER7
1973 * preserves the following registers in nap mode, even if an SMT mode
1974 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1975 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1977 /* Save non-volatile GPRs */
1978 std r14, VCPU_GPR(R14)(r3)
1979 std r15, VCPU_GPR(R15)(r3)
1980 std r16, VCPU_GPR(R16)(r3)
1981 std r17, VCPU_GPR(R17)(r3)
1982 std r18, VCPU_GPR(R18)(r3)
1983 std r19, VCPU_GPR(R19)(r3)
1984 std r20, VCPU_GPR(R20)(r3)
1985 std r21, VCPU_GPR(R21)(r3)
1986 std r22, VCPU_GPR(R22)(r3)
1987 std r23, VCPU_GPR(R23)(r3)
1988 std r24, VCPU_GPR(R24)(r3)
1989 std r25, VCPU_GPR(R25)(r3)
1990 std r26, VCPU_GPR(R26)(r3)
1991 std r27, VCPU_GPR(R27)(r3)
1992 std r28, VCPU_GPR(R28)(r3)
1993 std r29, VCPU_GPR(R29)(r3)
1994 std r30, VCPU_GPR(R30)(r3)
1995 std r31, VCPU_GPR(R31)(r3)
2001 * Take a nap until a decrementer or external or doobell interrupt
2002 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
2005 stb r0,HSTATE_HWTHREAD_REQ(r13)
2007 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2009 oris r5,r5,LPCR_PECEDP@h
2010 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2014 std r0, HSTATE_SCRATCH0(r13)
2016 ld r0, HSTATE_SCRATCH0(r13)
2028 /* get vcpu pointer */
2029 ld r4, HSTATE_KVM_VCPU(r13)
2031 /* Woken by external or decrementer interrupt */
2032 ld r1, HSTATE_HOST_R1(r13)
2034 /* load up FP state */
2038 ld r14, VCPU_GPR(R14)(r4)
2039 ld r15, VCPU_GPR(R15)(r4)
2040 ld r16, VCPU_GPR(R16)(r4)
2041 ld r17, VCPU_GPR(R17)(r4)
2042 ld r18, VCPU_GPR(R18)(r4)
2043 ld r19, VCPU_GPR(R19)(r4)
2044 ld r20, VCPU_GPR(R20)(r4)
2045 ld r21, VCPU_GPR(R21)(r4)
2046 ld r22, VCPU_GPR(R22)(r4)
2047 ld r23, VCPU_GPR(R23)(r4)
2048 ld r24, VCPU_GPR(R24)(r4)
2049 ld r25, VCPU_GPR(R25)(r4)
2050 ld r26, VCPU_GPR(R26)(r4)
2051 ld r27, VCPU_GPR(R27)(r4)
2052 ld r28, VCPU_GPR(R28)(r4)
2053 ld r29, VCPU_GPR(R29)(r4)
2054 ld r30, VCPU_GPR(R30)(r4)
2055 ld r31, VCPU_GPR(R31)(r4)
2057 /* Check the wake reason in SRR1 to see why we got here */
2058 bl kvmppc_check_wake_reason
2060 /* clear our bit in vcore->napping_threads */
2061 34: ld r5,HSTATE_KVM_VCORE(r13)
2062 lbz r7,HSTATE_PTID(r13)
2065 addi r6,r5,VCORE_NAPPING_THREADS
2071 stb r0,HSTATE_NAPPING(r13)
2073 /* See if the wake reason means we need to exit */
2074 stw r12, VCPU_TRAP(r4)
2079 /* see if any other thread is already exiting */
2080 lwz r0,VCORE_ENTRY_EXIT(r5)
2084 b kvmppc_cede_reentry /* if not go back to guest */
2086 /* cede when already previously prodded case */
2089 stb r0,VCPU_PRODDED(r3)
2090 sync /* order testing prodded vs. clearing ceded */
2091 stb r0,VCPU_CEDED(r3)
2095 /* we've ceded but we want to give control to the host */
2097 b hcall_real_fallback
2099 /* Try to handle a machine check in real mode */
2100 machine_check_realmode:
2101 mr r3, r9 /* get vcpu pointer */
2102 bl .kvmppc_realmode_machine_check
2104 cmpdi r3, 0 /* continue exiting from guest? */
2105 ld r9, HSTATE_KVM_VCPU(r13)
2106 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2108 /* If not, deliver a machine check. SRR0/1 are already set */
2109 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2110 bl kvmppc_msr_interrupt
2111 b fast_interrupt_c_return
2114 * Check the reason we woke from nap, and take appropriate action.
2116 * 0 if nothing needs to be done
2117 * 1 if something happened that needs to be handled by the host
2118 * -1 if there was a guest wakeup (IPI)
2120 * Also sets r12 to the interrupt vector for any interrupt that needs
2121 * to be handled now by the host (0x500 for external interrupt), or zero.
2123 kvmppc_check_wake_reason:
2126 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2128 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2129 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2130 cmpwi r6, 8 /* was it an external interrupt? */
2131 li r12, BOOK3S_INTERRUPT_EXTERNAL
2132 beq kvmppc_read_intr /* if so, see what it was */
2135 cmpwi r6, 6 /* was it the decrementer? */
2138 cmpwi r6, 5 /* privileged doorbell? */
2140 cmpwi r6, 3 /* hypervisor doorbell? */
2142 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2143 li r3, 1 /* anything else, return 1 */
2146 /* hypervisor doorbell */
2147 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2152 * Determine what sort of external interrupt is pending (if any).
2154 * 0 if no interrupt is pending
2155 * 1 if an interrupt is pending that needs to be handled by the host
2156 * -1 if there was a guest wakeup IPI (which has now been cleared)
2159 /* see if a host IPI is pending */
2161 lbz r0, HSTATE_HOST_IPI(r13)
2165 /* Now read the interrupt from the ICP */
2166 ld r6, HSTATE_XICS_PHYS(r13)
2171 rlwinm. r3, r0, 0, 0xffffff
2173 beq 1f /* if nothing pending in the ICP */
2175 /* We found something in the ICP...
2177 * If it's not an IPI, stash it in the PACA and return to
2178 * the host, we don't (yet) handle directing real external
2179 * interrupts directly to the guest
2181 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2184 /* It's an IPI, clear the MFRR and EOI it */
2187 stbcix r3, r6, r8 /* clear the IPI */
2188 stwcix r0, r6, r7 /* EOI it */
2191 /* We need to re-check host IPI now in case it got set in the
2192 * meantime. If it's clear, we bounce the interrupt to the
2195 lbz r0, HSTATE_HOST_IPI(r13)
2199 /* OK, it's an IPI for us */
2203 42: /* It's not an IPI and it's for the host, stash it in the PACA
2204 * before exit, it will be picked up by the host ICP driver
2206 stw r0, HSTATE_SAVED_XIRR(r13)
2210 43: /* We raced with the host, we need to resend that IPI, bummer */
2212 stbcix r0, r6, r8 /* set the IPI */
2218 * Save away FP, VMX and VSX registers.
2220 * N.B. r30 and r31 are volatile across this function,
2221 * thus it is not callable from C.
2228 #ifdef CONFIG_ALTIVEC
2230 oris r8,r8,MSR_VEC@h
2231 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2235 oris r8,r8,MSR_VSX@h
2236 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2240 addi r3,r3,VCPU_FPRS
2242 #ifdef CONFIG_ALTIVEC
2244 addi r3,r31,VCPU_VRS
2246 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2248 mfspr r6,SPRN_VRSAVE
2249 stw r6,VCPU_VRSAVE(r31)
2254 * Load up FP, VMX and VSX registers
2256 * N.B. r30 and r31 are volatile across this function,
2257 * thus it is not callable from C.
2264 #ifdef CONFIG_ALTIVEC
2266 oris r8,r8,MSR_VEC@h
2267 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2271 oris r8,r8,MSR_VSX@h
2272 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2276 addi r3,r4,VCPU_FPRS
2278 #ifdef CONFIG_ALTIVEC
2280 addi r3,r31,VCPU_VRS
2282 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2284 lwz r7,VCPU_VRSAVE(r31)
2285 mtspr SPRN_VRSAVE,r7
2291 * We come here if we get any exception or interrupt while we are
2292 * executing host real mode code while in guest MMU context.
2293 * For now just spin, but we should do something better.
2295 kvmppc_bad_host_intr:
2299 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2300 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2301 * r11 has the guest MSR value (in/out)
2302 * r9 has a vcpu pointer (in)
2303 * r0 is used as a scratch register
2305 kvmppc_msr_interrupt:
2306 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2307 cmpwi r0, 2 /* Check if we are in transactional state.. */
2308 ld r11, VCPU_INTR_MSR(r9)
2310 /* ... if transactional, change to suspended */
2312 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG