]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/powerpc/kernel/kvm_emul.S
KVM: PPC: Add mtsrin PV code
[mv-sheeva.git] / arch / powerpc / kernel / kvm_emul.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/asm-offsets.h>
25
26 /* Hypercall entry point. Will be patched with device tree instructions. */
27
28 .global kvm_hypercall_start
29 kvm_hypercall_start:
30         li      r3, -1
31         nop
32         nop
33         nop
34         blr
35
36 #define KVM_MAGIC_PAGE          (-4096)
37
38 #ifdef CONFIG_64BIT
39 #define LL64(reg, offs, reg2)   ld      reg, (offs)(reg2)
40 #define STL64(reg, offs, reg2)  std     reg, (offs)(reg2)
41 #else
42 #define LL64(reg, offs, reg2)   lwz     reg, (offs + 4)(reg2)
43 #define STL64(reg, offs, reg2)  stw     reg, (offs + 4)(reg2)
44 #endif
45
46 #define SCRATCH_SAVE                                                    \
47         /* Enable critical section. We are critical if                  \
48            shared->critical == r1 */                                    \
49         STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);              \
50                                                                         \
51         /* Save state */                                                \
52         PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
53         PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
54         mfcr    r31;                                                    \
55         stw     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
56
57 #define SCRATCH_RESTORE                                                 \
58         /* Restore state */                                             \
59         PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
60         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);          \
61         mtcr    r30;                                                    \
62         PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
63                                                                         \
64         /* Disable critical section. We are critical if                 \
65            shared->critical == r1 and r2 is always != r1 */             \
66         STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
67
68 .global kvm_emulate_mtmsrd
69 kvm_emulate_mtmsrd:
70
71         SCRATCH_SAVE
72
73         /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75         lis     r30, (~(MSR_EE | MSR_RI))@h
76         ori     r30, r30, (~(MSR_EE | MSR_RI))@l
77         and     r31, r31, r30
78
79         /* OR the register's (MSR_EE|MSR_RI) on MSR */
80 kvm_emulate_mtmsrd_reg:
81         andi.   r30, r0, (MSR_EE|MSR_RI)
82         or      r31, r31, r30
83
84         /* Put MSR back into magic page */
85         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
86
87         /* Check if we have to fetch an interrupt */
88         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
89         cmpwi   r31, 0
90         beq+    no_check
91
92         /* Check if we may trigger an interrupt */
93         andi.   r30, r30, MSR_EE
94         beq     no_check
95
96         SCRATCH_RESTORE
97
98         /* Nag hypervisor */
99         tlbsync
100
101         b       kvm_emulate_mtmsrd_branch
102
103 no_check:
104
105         SCRATCH_RESTORE
106
107         /* Go back to caller */
108 kvm_emulate_mtmsrd_branch:
109         b       .
110 kvm_emulate_mtmsrd_end:
111
112 .global kvm_emulate_mtmsrd_branch_offs
113 kvm_emulate_mtmsrd_branch_offs:
114         .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
115
116 .global kvm_emulate_mtmsrd_reg_offs
117 kvm_emulate_mtmsrd_reg_offs:
118         .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
119
120 .global kvm_emulate_mtmsrd_len
121 kvm_emulate_mtmsrd_len:
122         .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
123
124
125 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
126 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
127
128 .global kvm_emulate_mtmsr
129 kvm_emulate_mtmsr:
130
131         SCRATCH_SAVE
132
133         /* Fetch old MSR in r31 */
134         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
135
136         /* Find the changed bits between old and new MSR */
137 kvm_emulate_mtmsr_reg1:
138         xor     r31, r0, r31
139
140         /* Check if we need to really do mtmsr */
141         LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
142         and.    r31, r31, r30
143
144         /* No critical bits changed? Maybe we can stay in the guest. */
145         beq     maybe_stay_in_guest
146
147 do_mtmsr:
148
149         SCRATCH_RESTORE
150
151         /* Just fire off the mtmsr if it's critical */
152 kvm_emulate_mtmsr_orig_ins:
153         mtmsr   r0
154
155         b       kvm_emulate_mtmsr_branch
156
157 maybe_stay_in_guest:
158
159         /* Check if we have to fetch an interrupt */
160         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
161         cmpwi   r31, 0
162         beq+    no_mtmsr
163
164         /* Check if we may trigger an interrupt */
165 kvm_emulate_mtmsr_reg2:
166         andi.   r31, r0, MSR_EE
167         beq     no_mtmsr
168
169         b       do_mtmsr
170
171 no_mtmsr:
172
173         /* Put MSR into magic page because we don't call mtmsr */
174 kvm_emulate_mtmsr_reg3:
175         STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
176
177         SCRATCH_RESTORE
178
179         /* Go back to caller */
180 kvm_emulate_mtmsr_branch:
181         b       .
182 kvm_emulate_mtmsr_end:
183
184 .global kvm_emulate_mtmsr_branch_offs
185 kvm_emulate_mtmsr_branch_offs:
186         .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
187
188 .global kvm_emulate_mtmsr_reg1_offs
189 kvm_emulate_mtmsr_reg1_offs:
190         .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
191
192 .global kvm_emulate_mtmsr_reg2_offs
193 kvm_emulate_mtmsr_reg2_offs:
194         .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
195
196 .global kvm_emulate_mtmsr_reg3_offs
197 kvm_emulate_mtmsr_reg3_offs:
198         .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4
199
200 .global kvm_emulate_mtmsr_orig_ins_offs
201 kvm_emulate_mtmsr_orig_ins_offs:
202         .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
203
204 .global kvm_emulate_mtmsr_len
205 kvm_emulate_mtmsr_len:
206         .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
207
208
209
210 .global kvm_emulate_wrteei
211 kvm_emulate_wrteei:
212
213         SCRATCH_SAVE
214
215         /* Fetch old MSR in r31 */
216         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
217
218         /* Remove MSR_EE from old MSR */
219         li      r30, 0
220         ori     r30, r30, MSR_EE
221         andc    r31, r31, r30
222
223         /* OR new MSR_EE onto the old MSR */
224 kvm_emulate_wrteei_ee:
225         ori     r31, r31, 0
226
227         /* Write new MSR value back */
228         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
229
230         SCRATCH_RESTORE
231
232         /* Go back to caller */
233 kvm_emulate_wrteei_branch:
234         b       .
235 kvm_emulate_wrteei_end:
236
237 .global kvm_emulate_wrteei_branch_offs
238 kvm_emulate_wrteei_branch_offs:
239         .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
240
241 .global kvm_emulate_wrteei_ee_offs
242 kvm_emulate_wrteei_ee_offs:
243         .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
244
245 .global kvm_emulate_wrteei_len
246 kvm_emulate_wrteei_len:
247         .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
248
249
250 .global kvm_emulate_mtsrin
251 kvm_emulate_mtsrin:
252
253         SCRATCH_SAVE
254
255         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
256         andi.   r31, r31, MSR_DR | MSR_IR
257         beq     kvm_emulate_mtsrin_reg1
258
259         SCRATCH_RESTORE
260
261 kvm_emulate_mtsrin_orig_ins:
262         nop
263         b       kvm_emulate_mtsrin_branch
264
265 kvm_emulate_mtsrin_reg1:
266         /* rX >> 26 */
267         rlwinm  r30,r0,6,26,29
268
269 kvm_emulate_mtsrin_reg2:
270         stw     r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
271
272         SCRATCH_RESTORE
273
274         /* Go back to caller */
275 kvm_emulate_mtsrin_branch:
276         b       .
277 kvm_emulate_mtsrin_end:
278
279 .global kvm_emulate_mtsrin_branch_offs
280 kvm_emulate_mtsrin_branch_offs:
281         .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
282
283 .global kvm_emulate_mtsrin_reg1_offs
284 kvm_emulate_mtsrin_reg1_offs:
285         .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
286
287 .global kvm_emulate_mtsrin_reg2_offs
288 kvm_emulate_mtsrin_reg2_offs:
289         .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
290
291 .global kvm_emulate_mtsrin_orig_ins_offs
292 kvm_emulate_mtsrin_orig_ins_offs:
293         .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
294
295 .global kvm_emulate_mtsrin_len
296 kvm_emulate_mtsrin_len:
297         .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4