2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright Novell Inc 2010
17 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/kvm_ppc.h>
22 #include <asm/disassemble.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_fpu.h>
26 #include <asm/cacheflush.h>
27 #include <linux/vmalloc.h>
32 #define dprintk printk
34 #define dprintk(...) do { } while(0);
50 #define OP_31_LFSX 535
51 #define OP_31_LFSUX 567
52 #define OP_31_LFDX 599
53 #define OP_31_LFDUX 631
54 #define OP_31_STFSX 663
55 #define OP_31_STFSUX 695
56 #define OP_31_STFX 727
57 #define OP_31_STFUX 759
58 #define OP_31_LWIZX 887
59 #define OP_31_STFIWX 983
61 #define OP_59_FADDS 21
62 #define OP_59_FSUBS 20
63 #define OP_59_FSQRTS 22
64 #define OP_59_FDIVS 18
66 #define OP_59_FMULS 25
67 #define OP_59_FRSQRTES 26
68 #define OP_59_FMSUBS 28
69 #define OP_59_FMADDS 29
70 #define OP_59_FNMSUBS 30
71 #define OP_59_FNMADDS 31
74 #define OP_63_FCPSGN 8
76 #define OP_63_FCTIW 14
77 #define OP_63_FCTIWZ 15
80 #define OP_63_FSQRT 22
84 #define OP_63_FRSQRTE 26
85 #define OP_63_FMSUB 28
86 #define OP_63_FMADD 29
87 #define OP_63_FNMSUB 30
88 #define OP_63_FNMADD 31
89 #define OP_63_FCMPO 32
90 #define OP_63_MTFSB1 38 // XXX
93 #define OP_63_MCRFS 64
94 #define OP_63_MTFSB0 70
96 #define OP_63_MTFSFI 134
97 #define OP_63_FABS 264
98 #define OP_63_MFFS 583
99 #define OP_63_MTFSF 711
101 #define OP_4X_PS_CMPU0 0
102 #define OP_4X_PSQ_LX 6
103 #define OP_4XW_PSQ_STX 7
104 #define OP_4A_PS_SUM0 10
105 #define OP_4A_PS_SUM1 11
106 #define OP_4A_PS_MULS0 12
107 #define OP_4A_PS_MULS1 13
108 #define OP_4A_PS_MADDS0 14
109 #define OP_4A_PS_MADDS1 15
110 #define OP_4A_PS_DIV 18
111 #define OP_4A_PS_SUB 20
112 #define OP_4A_PS_ADD 21
113 #define OP_4A_PS_SEL 23
114 #define OP_4A_PS_RES 24
115 #define OP_4A_PS_MUL 25
116 #define OP_4A_PS_RSQRTE 26
117 #define OP_4A_PS_MSUB 28
118 #define OP_4A_PS_MADD 29
119 #define OP_4A_PS_NMSUB 30
120 #define OP_4A_PS_NMADD 31
121 #define OP_4X_PS_CMPO0 32
122 #define OP_4X_PSQ_LUX 38
123 #define OP_4XW_PSQ_STUX 39
124 #define OP_4X_PS_NEG 40
125 #define OP_4X_PS_CMPU1 64
126 #define OP_4X_PS_MR 72
127 #define OP_4X_PS_CMPO1 96
128 #define OP_4X_PS_NABS 136
129 #define OP_4X_PS_ABS 264
130 #define OP_4X_PS_MERGE00 528
131 #define OP_4X_PS_MERGE01 560
132 #define OP_4X_PS_MERGE10 592
133 #define OP_4X_PS_MERGE11 624
135 #define SCALAR_NONE 0
136 #define SCALAR_HIGH (1 << 0)
137 #define SCALAR_LOW (1 << 1)
138 #define SCALAR_NO_PS0 (1 << 2)
139 #define SCALAR_NO_PS1 (1 << 3)
141 #define GQR_ST_TYPE_MASK 0x00000007
142 #define GQR_ST_TYPE_SHIFT 0
143 #define GQR_ST_SCALE_MASK 0x00003f00
144 #define GQR_ST_SCALE_SHIFT 8
145 #define GQR_LD_TYPE_MASK 0x00070000
146 #define GQR_LD_TYPE_SHIFT 16
147 #define GQR_LD_SCALE_MASK 0x3f000000
148 #define GQR_LD_SCALE_SHIFT 24
150 #define GQR_QUANTIZE_FLOAT 0
151 #define GQR_QUANTIZE_U8 4
152 #define GQR_QUANTIZE_U16 5
153 #define GQR_QUANTIZE_S8 6
154 #define GQR_QUANTIZE_S16 7
156 #define FPU_LS_SINGLE 0
157 #define FPU_LS_DOUBLE 1
158 #define FPU_LS_SINGLE_LOW 2
160 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
162 struct thread_struct t;
164 t.fpscr.val = vcpu->arch.fpscr;
165 cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
168 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
172 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
173 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
174 vcpu->arch.dear = eaddr;
176 dsisr = kvmppc_set_field(0, 33, 33, 1);
178 to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
179 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
182 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
183 int rs, ulong addr, int ls_type)
185 int emulated = EMULATE_FAIL;
186 struct thread_struct t;
189 int len = sizeof(u32);
191 if (ls_type == FPU_LS_DOUBLE)
194 t.fpscr.val = vcpu->arch.fpscr;
196 /* read from memory */
197 r = kvmppc_ld(vcpu, &addr, len, tmp, true);
198 vcpu->arch.paddr_accessed = addr;
201 kvmppc_inject_pf(vcpu, addr, false);
203 } else if (r == EMULATE_DO_MMIO) {
204 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
208 emulated = EMULATE_DONE;
210 /* put in registers */
213 cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t);
214 vcpu->arch.qpr[rs] = *((u32*)tmp);
217 vcpu->arch.fpr[rs] = *((u64*)tmp);
221 dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
228 static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
229 int rs, ulong addr, int ls_type)
231 int emulated = EMULATE_FAIL;
232 struct thread_struct t;
238 t.fpscr.val = vcpu->arch.fpscr;
242 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t);
246 case FPU_LS_SINGLE_LOW:
247 *((u32*)tmp) = vcpu->arch.fpr[rs];
248 val = vcpu->arch.fpr[rs] & 0xffffffff;
252 *((u64*)tmp) = vcpu->arch.fpr[rs];
253 val = vcpu->arch.fpr[rs];
261 r = kvmppc_st(vcpu, &addr, len, tmp, true);
262 vcpu->arch.paddr_accessed = addr;
264 kvmppc_inject_pf(vcpu, addr, true);
265 } else if (r == EMULATE_DO_MMIO) {
266 emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
268 emulated = EMULATE_DONE;
271 dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
277 static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
278 int rs, ulong addr, bool w, int i)
280 int emulated = EMULATE_FAIL;
281 struct thread_struct t;
286 t.fpscr.val = vcpu->arch.fpscr;
288 /* read from memory */
290 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
291 memcpy(&tmp[1], &one, sizeof(u32));
293 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
295 vcpu->arch.paddr_accessed = addr;
297 kvmppc_inject_pf(vcpu, addr, false);
299 } else if ((r == EMULATE_DO_MMIO) && w) {
300 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
301 vcpu->arch.qpr[rs] = tmp[1];
303 } else if (r == EMULATE_DO_MMIO) {
304 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
308 emulated = EMULATE_DONE;
310 /* put in registers */
311 cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t);
312 vcpu->arch.qpr[rs] = tmp[1];
314 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
315 tmp[1], addr, w ? 4 : 8);
321 static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
322 int rs, ulong addr, bool w, int i)
324 int emulated = EMULATE_FAIL;
325 struct thread_struct t;
328 int len = w ? sizeof(u32) : sizeof(u64);
330 t.fpscr.val = vcpu->arch.fpscr;
332 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
333 tmp[1] = vcpu->arch.qpr[rs];
335 r = kvmppc_st(vcpu, &addr, len, tmp, true);
336 vcpu->arch.paddr_accessed = addr;
338 kvmppc_inject_pf(vcpu, addr, true);
339 } else if ((r == EMULATE_DO_MMIO) && w) {
340 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
341 } else if (r == EMULATE_DO_MMIO) {
342 u64 val = ((u64)tmp[0] << 32) | tmp[1];
343 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
345 emulated = EMULATE_DONE;
348 dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
349 tmp[0], tmp[1], addr, len);
355 * Cuts out inst bits with ordering according to spec.
356 * That means the leftmost bit is zero. All given bits are included.
358 static inline u32 inst_get_field(u32 inst, int msb, int lsb)
360 return kvmppc_get_field(inst, msb + 32, lsb + 32);
364 * Replaces inst bits with ordering according to spec.
366 static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
368 return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
371 bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
373 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
376 switch (get_op(inst)) {
392 switch (inst_get_field(inst, 21, 30)) {
403 case OP_4X_PS_MERGE00:
404 case OP_4X_PS_MERGE01:
405 case OP_4X_PS_MERGE10:
406 case OP_4X_PS_MERGE11:
410 switch (inst_get_field(inst, 25, 30)) {
412 case OP_4XW_PSQ_STUX:
416 switch (inst_get_field(inst, 26, 30)) {
421 case OP_4A_PS_MADDS0:
422 case OP_4A_PS_MADDS1:
429 case OP_4A_PS_RSQRTE:
438 switch (inst_get_field(inst, 21, 30)) {
446 switch (inst_get_field(inst, 26, 30)) {
456 switch (inst_get_field(inst, 21, 30)) {
478 switch (inst_get_field(inst, 26, 30)) {
489 switch (inst_get_field(inst, 21, 30)) {
507 static int get_d_signext(u32 inst)
509 int d = inst & 0x8ff;
517 static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
518 int reg_out, int reg_in1, int reg_in2,
519 int reg_in3, int scalar,
520 void (*func)(struct thread_struct *t,
522 u32 *src2, u32 *src3))
524 u32 *qpr = vcpu->arch.qpr;
525 u64 *fpr = vcpu->arch.fpr;
527 u32 ps0_in1, ps0_in2, ps0_in3;
528 u32 ps1_in1, ps1_in2, ps1_in3;
529 struct thread_struct t;
530 t.fpscr.val = vcpu->arch.fpscr;
536 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
537 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
538 cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t);
540 if (scalar & SCALAR_LOW)
541 ps0_in2 = qpr[reg_in2];
543 func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
545 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
546 ps0_in1, ps0_in2, ps0_in3, ps0_out);
548 if (!(scalar & SCALAR_NO_PS0))
549 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
552 ps1_in1 = qpr[reg_in1];
553 ps1_in2 = qpr[reg_in2];
554 ps1_in3 = qpr[reg_in3];
556 if (scalar & SCALAR_HIGH)
559 if (!(scalar & SCALAR_NO_PS1))
560 func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
562 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
563 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
568 static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
569 int reg_out, int reg_in1, int reg_in2,
571 void (*func)(struct thread_struct *t,
575 u32 *qpr = vcpu->arch.qpr;
576 u64 *fpr = vcpu->arch.fpr;
578 u32 ps0_in1, ps0_in2;
580 u32 ps1_in1, ps1_in2;
581 struct thread_struct t;
582 t.fpscr.val = vcpu->arch.fpscr;
588 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
590 if (scalar & SCALAR_LOW)
591 ps0_in2 = qpr[reg_in2];
593 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
595 func(&t, &ps0_out, &ps0_in1, &ps0_in2);
597 if (!(scalar & SCALAR_NO_PS0)) {
598 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
599 ps0_in1, ps0_in2, ps0_out);
601 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
605 ps1_in1 = qpr[reg_in1];
606 ps1_in2 = qpr[reg_in2];
608 if (scalar & SCALAR_HIGH)
611 func(&t, &ps1_out, &ps1_in1, &ps1_in2);
613 if (!(scalar & SCALAR_NO_PS1)) {
614 qpr[reg_out] = ps1_out;
616 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
617 ps1_in1, ps1_in2, qpr[reg_out]);
623 static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
624 int reg_out, int reg_in,
625 void (*func)(struct thread_struct *t,
626 u32 *dst, u32 *src1))
628 u32 *qpr = vcpu->arch.qpr;
629 u64 *fpr = vcpu->arch.fpr;
632 struct thread_struct t;
633 t.fpscr.val = vcpu->arch.fpscr;
639 cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t);
640 func(&t, &ps0_out, &ps0_in);
642 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
645 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
648 ps1_in = qpr[reg_in];
649 func(&t, &qpr[reg_out], &ps1_in);
651 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
652 ps1_in, qpr[reg_out]);
657 int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
659 u32 inst = kvmppc_get_last_inst(vcpu);
660 enum emulation_result emulated = EMULATE_DONE;
662 int ax_rd = inst_get_field(inst, 6, 10);
663 int ax_ra = inst_get_field(inst, 11, 15);
664 int ax_rb = inst_get_field(inst, 16, 20);
665 int ax_rc = inst_get_field(inst, 21, 25);
666 short full_d = inst_get_field(inst, 16, 31);
668 u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
669 u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
670 u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
671 u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
673 bool rcomp = (inst & 1) ? true : false;
674 u32 cr = kvmppc_get_cr(vcpu);
675 struct thread_struct t;
680 t.fpscr.val = vcpu->arch.fpscr;
682 if (!kvmppc_inst_is_paired_single(vcpu, inst))
685 if (!(vcpu->arch.msr & MSR_FP)) {
686 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
687 return EMULATE_AGAIN;
690 kvmppc_giveup_ext(vcpu, MSR_FP);
693 /* Do we need to clear FE0 / FE1 here? Don't think so. */
696 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
698 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
699 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
700 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
704 switch (get_op(inst)) {
707 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
708 bool w = inst_get_field(inst, 16, 16) ? true : false;
709 int i = inst_get_field(inst, 17, 19);
711 addr += get_d_signext(inst);
712 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
717 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
718 bool w = inst_get_field(inst, 16, 16) ? true : false;
719 int i = inst_get_field(inst, 17, 19);
721 addr += get_d_signext(inst);
722 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
724 if (emulated == EMULATE_DONE)
725 kvmppc_set_gpr(vcpu, ax_ra, addr);
730 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
731 bool w = inst_get_field(inst, 16, 16) ? true : false;
732 int i = inst_get_field(inst, 17, 19);
734 addr += get_d_signext(inst);
735 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
740 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
741 bool w = inst_get_field(inst, 16, 16) ? true : false;
742 int i = inst_get_field(inst, 17, 19);
744 addr += get_d_signext(inst);
745 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
747 if (emulated == EMULATE_DONE)
748 kvmppc_set_gpr(vcpu, ax_ra, addr);
753 switch (inst_get_field(inst, 21, 30)) {
756 emulated = EMULATE_FAIL;
760 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
761 bool w = inst_get_field(inst, 21, 21) ? true : false;
762 int i = inst_get_field(inst, 22, 24);
764 addr += kvmppc_get_gpr(vcpu, ax_rb);
765 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
770 emulated = EMULATE_FAIL;
774 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
775 bool w = inst_get_field(inst, 21, 21) ? true : false;
776 int i = inst_get_field(inst, 22, 24);
778 addr += kvmppc_get_gpr(vcpu, ax_rb);
779 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
781 if (emulated == EMULATE_DONE)
782 kvmppc_set_gpr(vcpu, ax_ra, addr);
786 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
787 vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
788 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
789 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
793 emulated = EMULATE_FAIL;
797 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
798 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
802 emulated = EMULATE_FAIL;
806 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
807 vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
808 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
809 vcpu->arch.qpr[ax_rd] |= 0x80000000;
813 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
814 vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
815 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
816 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
818 case OP_4X_PS_MERGE00:
820 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
821 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
822 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
823 (float*)&vcpu->arch.qpr[ax_rd], &t);
825 case OP_4X_PS_MERGE01:
827 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
828 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
830 case OP_4X_PS_MERGE10:
832 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
833 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
834 (double*)&vcpu->arch.fpr[ax_rd], &t);
835 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
836 cvt_df((double*)&vcpu->arch.fpr[ax_rb],
837 (float*)&vcpu->arch.qpr[ax_rd], &t);
839 case OP_4X_PS_MERGE11:
841 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
842 cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
843 (double*)&vcpu->arch.fpr[ax_rd], &t);
844 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
848 switch (inst_get_field(inst, 25, 30)) {
851 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
852 bool w = inst_get_field(inst, 21, 21) ? true : false;
853 int i = inst_get_field(inst, 22, 24);
855 addr += kvmppc_get_gpr(vcpu, ax_rb);
856 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
859 case OP_4XW_PSQ_STUX:
861 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
862 bool w = inst_get_field(inst, 21, 21) ? true : false;
863 int i = inst_get_field(inst, 22, 24);
865 addr += kvmppc_get_gpr(vcpu, ax_rb);
866 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
868 if (emulated == EMULATE_DONE)
869 kvmppc_set_gpr(vcpu, ax_ra, addr);
874 switch (inst_get_field(inst, 26, 30)) {
876 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
877 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
878 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
881 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
882 ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
883 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
886 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
887 ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
890 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
891 ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
893 case OP_4A_PS_MADDS0:
894 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
895 ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
897 case OP_4A_PS_MADDS1:
898 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
899 ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
902 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
903 ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
906 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
907 ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
910 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
911 ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
914 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
915 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
918 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
922 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
923 ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
925 case OP_4A_PS_RSQRTE:
926 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
930 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
931 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
934 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
935 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
938 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
939 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
942 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
943 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
948 /* Real FPU operations */
952 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
954 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
960 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
962 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
965 if (emulated == EMULATE_DONE)
966 kvmppc_set_gpr(vcpu, ax_ra, addr);
971 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
973 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
979 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
981 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
984 if (emulated == EMULATE_DONE)
985 kvmppc_set_gpr(vcpu, ax_ra, addr);
990 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
992 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
998 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
1000 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1003 if (emulated == EMULATE_DONE)
1004 kvmppc_set_gpr(vcpu, ax_ra, addr);
1009 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
1011 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1017 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
1019 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
1022 if (emulated == EMULATE_DONE)
1023 kvmppc_set_gpr(vcpu, ax_ra, addr);
1027 switch (inst_get_field(inst, 21, 30)) {
1030 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
1032 addr += kvmppc_get_gpr(vcpu, ax_rb);
1033 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1034 addr, FPU_LS_SINGLE);
1039 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1040 kvmppc_get_gpr(vcpu, ax_rb);
1042 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1043 addr, FPU_LS_SINGLE);
1045 if (emulated == EMULATE_DONE)
1046 kvmppc_set_gpr(vcpu, ax_ra, addr);
1051 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1052 kvmppc_get_gpr(vcpu, ax_rb);
1054 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1055 addr, FPU_LS_DOUBLE);
1060 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1061 kvmppc_get_gpr(vcpu, ax_rb);
1063 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1064 addr, FPU_LS_DOUBLE);
1066 if (emulated == EMULATE_DONE)
1067 kvmppc_set_gpr(vcpu, ax_ra, addr);
1072 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1073 kvmppc_get_gpr(vcpu, ax_rb);
1075 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1076 addr, FPU_LS_SINGLE);
1081 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1082 kvmppc_get_gpr(vcpu, ax_rb);
1084 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1085 addr, FPU_LS_SINGLE);
1087 if (emulated == EMULATE_DONE)
1088 kvmppc_set_gpr(vcpu, ax_ra, addr);
1093 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1094 kvmppc_get_gpr(vcpu, ax_rb);
1096 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1097 addr, FPU_LS_DOUBLE);
1102 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1103 kvmppc_get_gpr(vcpu, ax_rb);
1105 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1106 addr, FPU_LS_DOUBLE);
1108 if (emulated == EMULATE_DONE)
1109 kvmppc_set_gpr(vcpu, ax_ra, addr);
1114 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1115 kvmppc_get_gpr(vcpu, ax_rb);
1117 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1126 switch (inst_get_field(inst, 21, 30)) {
1128 fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1129 kvmppc_sync_qpr(vcpu, ax_rd);
1132 fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1133 kvmppc_sync_qpr(vcpu, ax_rd);
1136 fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1137 kvmppc_sync_qpr(vcpu, ax_rd);
1140 fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1141 kvmppc_sync_qpr(vcpu, ax_rd);
1143 case OP_59_FRSQRTES:
1144 fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1145 kvmppc_sync_qpr(vcpu, ax_rd);
1148 switch (inst_get_field(inst, 26, 30)) {
1150 fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1151 kvmppc_sync_qpr(vcpu, ax_rd);
1154 fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1155 kvmppc_sync_qpr(vcpu, ax_rd);
1158 fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1159 kvmppc_sync_qpr(vcpu, ax_rd);
1162 fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1163 kvmppc_sync_qpr(vcpu, ax_rd);
1166 fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1167 kvmppc_sync_qpr(vcpu, ax_rd);
1172 switch (inst_get_field(inst, 21, 30)) {
1177 /* XXX need to implement */
1180 /* XXX missing CR */
1181 *fpr_d = vcpu->arch.fpscr;
1184 /* XXX missing fm bits */
1185 /* XXX missing CR */
1186 vcpu->arch.fpscr = *fpr_b;
1191 u32 cr0_mask = 0xf0000000;
1192 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1194 fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1195 cr &= ~(cr0_mask >> cr_shift);
1196 cr |= (cr & cr0_mask) >> cr_shift;
1202 u32 cr0_mask = 0xf0000000;
1203 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1205 fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1206 cr &= ~(cr0_mask >> cr_shift);
1207 cr |= (cr & cr0_mask) >> cr_shift;
1211 fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1217 fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1220 fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1223 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1226 fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1229 fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1232 fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1235 fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1238 fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1239 kvmppc_sync_qpr(vcpu, ax_rd);
1246 fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1247 /* fD = 1.0f / fD */
1248 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1252 switch (inst_get_field(inst, 26, 30)) {
1254 fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1257 fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1260 fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1263 fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1266 fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1269 fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1276 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
1278 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
1279 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1284 kvmppc_set_cr(vcpu, cr);