]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/emulate.c
Merge tag 'docs' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[karo-tx-linux.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include "timing.h"
34 #include "trace.h"
35
36 #define OP_TRAP 3
37 #define OP_TRAP_64 2
38
39 #define OP_31_XOP_TRAP      4
40 #define OP_31_XOP_LWZX      23
41 #define OP_31_XOP_TRAP_64   68
42 #define OP_31_XOP_LBZX      87
43 #define OP_31_XOP_STWX      151
44 #define OP_31_XOP_STBX      215
45 #define OP_31_XOP_LBZUX     119
46 #define OP_31_XOP_STBUX     247
47 #define OP_31_XOP_LHZX      279
48 #define OP_31_XOP_LHZUX     311
49 #define OP_31_XOP_MFSPR     339
50 #define OP_31_XOP_LHAX      343
51 #define OP_31_XOP_STHX      407
52 #define OP_31_XOP_STHUX     439
53 #define OP_31_XOP_MTSPR     467
54 #define OP_31_XOP_DCBI      470
55 #define OP_31_XOP_LWBRX     534
56 #define OP_31_XOP_TLBSYNC   566
57 #define OP_31_XOP_STWBRX    662
58 #define OP_31_XOP_LHBRX     790
59 #define OP_31_XOP_STHBRX    918
60
61 #define OP_LWZ  32
62 #define OP_LD   58
63 #define OP_LWZU 33
64 #define OP_LBZ  34
65 #define OP_LBZU 35
66 #define OP_STW  36
67 #define OP_STWU 37
68 #define OP_STD  62
69 #define OP_STB  38
70 #define OP_STBU 39
71 #define OP_LHZ  40
72 #define OP_LHZU 41
73 #define OP_LHA  42
74 #define OP_LHAU 43
75 #define OP_STH  44
76 #define OP_STHU 45
77
78 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
79 {
80         unsigned long dec_nsec;
81         unsigned long long dec_time;
82
83         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
84         hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
85
86 #ifdef CONFIG_PPC_BOOK3S
87         /* mtdec lowers the interrupt line when positive. */
88         kvmppc_core_dequeue_dec(vcpu);
89
90         /* POWER4+ triggers a dec interrupt if the value is < 0 */
91         if (vcpu->arch.dec & 0x80000000) {
92                 kvmppc_core_queue_dec(vcpu);
93                 return;
94         }
95 #endif
96
97 #ifdef CONFIG_BOOKE
98         /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
99         if (vcpu->arch.dec == 0)
100                 return;
101 #endif
102
103         /*
104          * The decrementer ticks at the same rate as the timebase, so
105          * that's how we convert the guest DEC value to the number of
106          * host ticks.
107          */
108
109         dec_time = vcpu->arch.dec;
110         /*
111          * Guest timebase ticks at the same frequency as host decrementer.
112          * So use the host decrementer calculations for decrementer emulation.
113          */
114         dec_time = dec_time << decrementer_clockevent.shift;
115         do_div(dec_time, decrementer_clockevent.mult);
116         dec_nsec = do_div(dec_time, NSEC_PER_SEC);
117         hrtimer_start(&vcpu->arch.dec_timer,
118                 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
119         vcpu->arch.dec_jiffies = get_tb();
120 }
121
122 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
123 {
124         u64 jd = tb - vcpu->arch.dec_jiffies;
125
126 #ifdef CONFIG_BOOKE
127         if (vcpu->arch.dec < jd)
128                 return 0;
129 #endif
130
131         return vcpu->arch.dec - jd;
132 }
133
134 /* XXX to do:
135  * lhax
136  * lhaux
137  * lswx
138  * lswi
139  * stswx
140  * stswi
141  * lha
142  * lhau
143  * lmw
144  * stmw
145  *
146  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
147  */
148 /* XXX Should probably auto-generate instruction decoding for a particular core
149  * from opcode tables in the future. */
150 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
151 {
152         u32 inst = kvmppc_get_last_inst(vcpu);
153         int ra = get_ra(inst);
154         int rs = get_rs(inst);
155         int rt = get_rt(inst);
156         int sprn = get_sprn(inst);
157         enum emulation_result emulated = EMULATE_DONE;
158         int advance = 1;
159         ulong spr_val = 0;
160
161         /* this default type might be overwritten by subcategories */
162         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
163
164         pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
165
166         switch (get_op(inst)) {
167         case OP_TRAP:
168 #ifdef CONFIG_PPC_BOOK3S
169         case OP_TRAP_64:
170                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
171 #else
172                 kvmppc_core_queue_program(vcpu,
173                                           vcpu->arch.shared->esr | ESR_PTR);
174 #endif
175                 advance = 0;
176                 break;
177
178         case 31:
179                 switch (get_xop(inst)) {
180
181                 case OP_31_XOP_TRAP:
182 #ifdef CONFIG_64BIT
183                 case OP_31_XOP_TRAP_64:
184 #endif
185 #ifdef CONFIG_PPC_BOOK3S
186                         kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
187 #else
188                         kvmppc_core_queue_program(vcpu,
189                                         vcpu->arch.shared->esr | ESR_PTR);
190 #endif
191                         advance = 0;
192                         break;
193                 case OP_31_XOP_LWZX:
194                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
195                         break;
196
197                 case OP_31_XOP_LBZX:
198                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
199                         break;
200
201                 case OP_31_XOP_LBZUX:
202                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
203                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
204                         break;
205
206                 case OP_31_XOP_STWX:
207                         emulated = kvmppc_handle_store(run, vcpu,
208                                                        kvmppc_get_gpr(vcpu, rs),
209                                                        4, 1);
210                         break;
211
212                 case OP_31_XOP_STBX:
213                         emulated = kvmppc_handle_store(run, vcpu,
214                                                        kvmppc_get_gpr(vcpu, rs),
215                                                        1, 1);
216                         break;
217
218                 case OP_31_XOP_STBUX:
219                         emulated = kvmppc_handle_store(run, vcpu,
220                                                        kvmppc_get_gpr(vcpu, rs),
221                                                        1, 1);
222                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
223                         break;
224
225                 case OP_31_XOP_LHAX:
226                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
227                         break;
228
229                 case OP_31_XOP_LHZX:
230                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
231                         break;
232
233                 case OP_31_XOP_LHZUX:
234                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
235                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
236                         break;
237
238                 case OP_31_XOP_MFSPR:
239                         switch (sprn) {
240                         case SPRN_SRR0:
241                                 spr_val = vcpu->arch.shared->srr0;
242                                 break;
243                         case SPRN_SRR1:
244                                 spr_val = vcpu->arch.shared->srr1;
245                                 break;
246                         case SPRN_PVR:
247                                 spr_val = vcpu->arch.pvr;
248                                 break;
249                         case SPRN_PIR:
250                                 spr_val = vcpu->vcpu_id;
251                                 break;
252                         case SPRN_MSSSR0:
253                                 spr_val = 0;
254                                 break;
255
256                         /* Note: mftb and TBRL/TBWL are user-accessible, so
257                          * the guest can always access the real TB anyways.
258                          * In fact, we probably will never see these traps. */
259                         case SPRN_TBWL:
260                                 spr_val = get_tb() >> 32;
261                                 break;
262                         case SPRN_TBWU:
263                                 spr_val = get_tb();
264                                 break;
265
266                         case SPRN_SPRG0:
267                                 spr_val = vcpu->arch.shared->sprg0;
268                                 break;
269                         case SPRN_SPRG1:
270                                 spr_val = vcpu->arch.shared->sprg1;
271                                 break;
272                         case SPRN_SPRG2:
273                                 spr_val = vcpu->arch.shared->sprg2;
274                                 break;
275                         case SPRN_SPRG3:
276                                 spr_val = vcpu->arch.shared->sprg3;
277                                 break;
278                         /* Note: SPRG4-7 are user-readable, so we don't get
279                          * a trap. */
280
281                         case SPRN_DEC:
282                                 spr_val = kvmppc_get_dec(vcpu, get_tb());
283                                 break;
284                         default:
285                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
286                                                                      &spr_val);
287                                 if (unlikely(emulated == EMULATE_FAIL)) {
288                                         printk(KERN_INFO "mfspr: unknown spr "
289                                                 "0x%x\n", sprn);
290                                 }
291                                 break;
292                         }
293                         kvmppc_set_gpr(vcpu, rt, spr_val);
294                         kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
295                         break;
296
297                 case OP_31_XOP_STHX:
298                         emulated = kvmppc_handle_store(run, vcpu,
299                                                        kvmppc_get_gpr(vcpu, rs),
300                                                        2, 1);
301                         break;
302
303                 case OP_31_XOP_STHUX:
304                         emulated = kvmppc_handle_store(run, vcpu,
305                                                        kvmppc_get_gpr(vcpu, rs),
306                                                        2, 1);
307                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
308                         break;
309
310                 case OP_31_XOP_MTSPR:
311                         spr_val = kvmppc_get_gpr(vcpu, rs);
312                         switch (sprn) {
313                         case SPRN_SRR0:
314                                 vcpu->arch.shared->srr0 = spr_val;
315                                 break;
316                         case SPRN_SRR1:
317                                 vcpu->arch.shared->srr1 = spr_val;
318                                 break;
319
320                         /* XXX We need to context-switch the timebase for
321                          * watchdog and FIT. */
322                         case SPRN_TBWL: break;
323                         case SPRN_TBWU: break;
324
325                         case SPRN_MSSSR0: break;
326
327                         case SPRN_DEC:
328                                 vcpu->arch.dec = spr_val;
329                                 kvmppc_emulate_dec(vcpu);
330                                 break;
331
332                         case SPRN_SPRG0:
333                                 vcpu->arch.shared->sprg0 = spr_val;
334                                 break;
335                         case SPRN_SPRG1:
336                                 vcpu->arch.shared->sprg1 = spr_val;
337                                 break;
338                         case SPRN_SPRG2:
339                                 vcpu->arch.shared->sprg2 = spr_val;
340                                 break;
341                         case SPRN_SPRG3:
342                                 vcpu->arch.shared->sprg3 = spr_val;
343                                 break;
344
345                         default:
346                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
347                                                                      spr_val);
348                                 if (emulated == EMULATE_FAIL)
349                                         printk(KERN_INFO "mtspr: unknown spr "
350                                                 "0x%x\n", sprn);
351                                 break;
352                         }
353                         kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
354                         break;
355
356                 case OP_31_XOP_DCBI:
357                         /* Do nothing. The guest is performing dcbi because
358                          * hardware DMA is not snooped by the dcache, but
359                          * emulated DMA either goes through the dcache as
360                          * normal writes, or the host kernel has handled dcache
361                          * coherence. */
362                         break;
363
364                 case OP_31_XOP_LWBRX:
365                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
366                         break;
367
368                 case OP_31_XOP_TLBSYNC:
369                         break;
370
371                 case OP_31_XOP_STWBRX:
372                         emulated = kvmppc_handle_store(run, vcpu,
373                                                        kvmppc_get_gpr(vcpu, rs),
374                                                        4, 0);
375                         break;
376
377                 case OP_31_XOP_LHBRX:
378                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
379                         break;
380
381                 case OP_31_XOP_STHBRX:
382                         emulated = kvmppc_handle_store(run, vcpu,
383                                                        kvmppc_get_gpr(vcpu, rs),
384                                                        2, 0);
385                         break;
386
387                 default:
388                         /* Attempt core-specific emulation below. */
389                         emulated = EMULATE_FAIL;
390                 }
391                 break;
392
393         case OP_LWZ:
394                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
395                 break;
396
397         /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
398         case OP_LD:
399                 rt = get_rt(inst);
400                 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
401                 break;
402
403         case OP_LWZU:
404                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
405                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
406                 break;
407
408         case OP_LBZ:
409                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
410                 break;
411
412         case OP_LBZU:
413                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
414                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
415                 break;
416
417         case OP_STW:
418                 emulated = kvmppc_handle_store(run, vcpu,
419                                                kvmppc_get_gpr(vcpu, rs),
420                                                4, 1);
421                 break;
422
423         /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
424         case OP_STD:
425                 rs = get_rs(inst);
426                 emulated = kvmppc_handle_store(run, vcpu,
427                                                kvmppc_get_gpr(vcpu, rs),
428                                                8, 1);
429                 break;
430
431         case OP_STWU:
432                 emulated = kvmppc_handle_store(run, vcpu,
433                                                kvmppc_get_gpr(vcpu, rs),
434                                                4, 1);
435                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
436                 break;
437
438         case OP_STB:
439                 emulated = kvmppc_handle_store(run, vcpu,
440                                                kvmppc_get_gpr(vcpu, rs),
441                                                1, 1);
442                 break;
443
444         case OP_STBU:
445                 emulated = kvmppc_handle_store(run, vcpu,
446                                                kvmppc_get_gpr(vcpu, rs),
447                                                1, 1);
448                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
449                 break;
450
451         case OP_LHZ:
452                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
453                 break;
454
455         case OP_LHZU:
456                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
457                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
458                 break;
459
460         case OP_LHA:
461                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
462                 break;
463
464         case OP_LHAU:
465                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
466                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
467                 break;
468
469         case OP_STH:
470                 emulated = kvmppc_handle_store(run, vcpu,
471                                                kvmppc_get_gpr(vcpu, rs),
472                                                2, 1);
473                 break;
474
475         case OP_STHU:
476                 emulated = kvmppc_handle_store(run, vcpu,
477                                                kvmppc_get_gpr(vcpu, rs),
478                                                2, 1);
479                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
480                 break;
481
482         default:
483                 emulated = EMULATE_FAIL;
484         }
485
486         if (emulated == EMULATE_FAIL) {
487                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
488                 if (emulated == EMULATE_AGAIN) {
489                         advance = 0;
490                 } else if (emulated == EMULATE_FAIL) {
491                         advance = 0;
492                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
493                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
494                         kvmppc_core_queue_program(vcpu, 0);
495                 }
496         }
497
498         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
499
500         /* Advance past emulated instruction. */
501         if (advance)
502                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
503
504         return emulated;
505 }