2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/ppc_asm.h>
22 #include <asm/kvm_asm.h>
25 #include <asm/asm-offsets.h>
27 /* Hypercall entry point. Will be patched with device tree instructions. */
29 .global kvm_hypercall_start
37 #define KVM_MAGIC_PAGE (-4096)
40 #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
41 #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
43 #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
44 #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
47 #define SCRATCH_SAVE \
48 /* Enable critical section. We are critical if \
49 shared->critical == r1 */ \
50 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
53 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
54 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
56 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
58 #define SCRATCH_RESTORE \
60 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
61 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
63 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
65 /* Disable critical section. We are critical if \
66 shared->critical == r1 and r2 is always != r1 */ \
67 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
69 .global kvm_template_start
72 .global kvm_emulate_mtmsrd
77 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
78 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
79 lis r30, (~(MSR_EE | MSR_RI))@h
80 ori r30, r30, (~(MSR_EE | MSR_RI))@l
83 /* OR the register's (MSR_EE|MSR_RI) on MSR */
84 kvm_emulate_mtmsrd_reg:
86 andi. r30, r30, (MSR_EE|MSR_RI)
89 /* Put MSR back into magic page */
90 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
92 /* Check if we have to fetch an interrupt */
93 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
97 /* Check if we may trigger an interrupt */
98 andi. r30, r30, MSR_EE
104 kvm_emulate_mtmsrd_orig_ins:
107 b kvm_emulate_mtmsrd_branch
113 /* Go back to caller */
114 kvm_emulate_mtmsrd_branch:
116 kvm_emulate_mtmsrd_end:
118 .global kvm_emulate_mtmsrd_branch_offs
119 kvm_emulate_mtmsrd_branch_offs:
120 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
122 .global kvm_emulate_mtmsrd_reg_offs
123 kvm_emulate_mtmsrd_reg_offs:
124 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
126 .global kvm_emulate_mtmsrd_orig_ins_offs
127 kvm_emulate_mtmsrd_orig_ins_offs:
128 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
130 .global kvm_emulate_mtmsrd_len
131 kvm_emulate_mtmsrd_len:
132 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
135 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
136 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
138 .global kvm_emulate_mtmsr
143 /* Fetch old MSR in r31 */
144 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
146 /* Find the changed bits between old and new MSR */
147 kvm_emulate_mtmsr_reg1:
151 /* Check if we need to really do mtmsr */
152 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
155 /* No critical bits changed? Maybe we can stay in the guest. */
156 beq maybe_stay_in_guest
162 /* Just fire off the mtmsr if it's critical */
163 kvm_emulate_mtmsr_orig_ins:
166 b kvm_emulate_mtmsr_branch
170 /* Get the target register in r30 */
171 kvm_emulate_mtmsr_reg2:
174 /* Put MSR into magic page because we don't call mtmsr */
175 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
177 /* Check if we have to fetch an interrupt */
178 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
182 /* Check if we may trigger an interrupt */
183 andi. r31, r30, MSR_EE
190 /* Go back to caller */
191 kvm_emulate_mtmsr_branch:
193 kvm_emulate_mtmsr_end:
195 .global kvm_emulate_mtmsr_branch_offs
196 kvm_emulate_mtmsr_branch_offs:
197 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
199 .global kvm_emulate_mtmsr_reg1_offs
200 kvm_emulate_mtmsr_reg1_offs:
201 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
203 .global kvm_emulate_mtmsr_reg2_offs
204 kvm_emulate_mtmsr_reg2_offs:
205 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
207 .global kvm_emulate_mtmsr_orig_ins_offs
208 kvm_emulate_mtmsr_orig_ins_offs:
209 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
211 .global kvm_emulate_mtmsr_len
212 kvm_emulate_mtmsr_len:
213 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
215 /* also used for wrteei 1 */
216 .global kvm_emulate_wrtee
221 /* Fetch old MSR in r31 */
222 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
224 /* Insert new MSR[EE] */
225 kvm_emulate_wrtee_reg:
227 rlwimi r31, r30, 0, MSR_EE
230 * If MSR[EE] is now set, check for a pending interrupt.
231 * We could skip this if MSR[EE] was already on, but that
232 * should be rare, so don't bother.
234 andi. r30, r30, MSR_EE
236 /* Put MSR into magic page because we don't call wrtee */
237 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
241 /* Check if we have to fetch an interrupt */
242 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
249 /* Go back to caller */
250 kvm_emulate_wrtee_branch:
256 /* Just fire off the wrtee if it's critical */
257 kvm_emulate_wrtee_orig_ins:
260 b kvm_emulate_wrtee_branch
262 kvm_emulate_wrtee_end:
264 .global kvm_emulate_wrtee_branch_offs
265 kvm_emulate_wrtee_branch_offs:
266 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
268 .global kvm_emulate_wrtee_reg_offs
269 kvm_emulate_wrtee_reg_offs:
270 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
272 .global kvm_emulate_wrtee_orig_ins_offs
273 kvm_emulate_wrtee_orig_ins_offs:
274 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
276 .global kvm_emulate_wrtee_len
277 kvm_emulate_wrtee_len:
278 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
280 .global kvm_emulate_wrteei_0
281 kvm_emulate_wrteei_0:
284 /* Fetch old MSR in r31 */
285 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
287 /* Remove MSR_EE from old MSR */
288 rlwinm r31, r31, 0, ~MSR_EE
290 /* Write new MSR value back */
291 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
295 /* Go back to caller */
296 kvm_emulate_wrteei_0_branch:
298 kvm_emulate_wrteei_0_end:
300 .global kvm_emulate_wrteei_0_branch_offs
301 kvm_emulate_wrteei_0_branch_offs:
302 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
304 .global kvm_emulate_wrteei_0_len
305 kvm_emulate_wrteei_0_len:
306 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
308 .global kvm_emulate_mtsrin
313 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
314 andi. r31, r31, MSR_DR | MSR_IR
315 beq kvm_emulate_mtsrin_reg1
319 kvm_emulate_mtsrin_orig_ins:
321 b kvm_emulate_mtsrin_branch
323 kvm_emulate_mtsrin_reg1:
325 rlwinm r30,r0,6,26,29
327 kvm_emulate_mtsrin_reg2:
328 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
332 /* Go back to caller */
333 kvm_emulate_mtsrin_branch:
335 kvm_emulate_mtsrin_end:
337 .global kvm_emulate_mtsrin_branch_offs
338 kvm_emulate_mtsrin_branch_offs:
339 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
341 .global kvm_emulate_mtsrin_reg1_offs
342 kvm_emulate_mtsrin_reg1_offs:
343 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
345 .global kvm_emulate_mtsrin_reg2_offs
346 kvm_emulate_mtsrin_reg2_offs:
347 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
349 .global kvm_emulate_mtsrin_orig_ins_offs
350 kvm_emulate_mtsrin_orig_ins_offs:
351 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
353 .global kvm_emulate_mtsrin_len
354 kvm_emulate_mtsrin_len:
355 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
357 .global kvm_template_end