1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
81 #define VendorSpecific (1<<22) /* Vendor specific instruction */
82 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
83 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
84 #define Undefined (1<<25) /* No Such Instruction */
85 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
86 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
88 /* Source 2 operand type */
89 #define Src2None (0<<29)
90 #define Src2CL (1<<29)
91 #define Src2ImmByte (2<<29)
92 #define Src2One (3<<29)
93 #define Src2Imm (4<<29)
94 #define Src2Mask (7<<29)
97 #define X3(x...) X2(x), x
98 #define X4(x...) X2(x), X2(x)
99 #define X5(x...) X4(x), x
100 #define X6(x...) X4(x), X2(x)
101 #define X7(x...) X4(x), X3(x)
102 #define X8(x...) X4(x), X4(x)
103 #define X16(x...) X8(x), X8(x)
108 int (*execute)(struct x86_emulate_ctxt *ctxt);
109 struct opcode *group;
110 struct group_dual *gdual;
111 struct gprefix *gprefix;
116 struct opcode mod012[8];
117 struct opcode mod3[8];
121 struct opcode pfx_no;
122 struct opcode pfx_66;
123 struct opcode pfx_f2;
124 struct opcode pfx_f3;
127 /* EFLAGS bit definitions. */
128 #define EFLG_ID (1<<21)
129 #define EFLG_VIP (1<<20)
130 #define EFLG_VIF (1<<19)
131 #define EFLG_AC (1<<18)
132 #define EFLG_VM (1<<17)
133 #define EFLG_RF (1<<16)
134 #define EFLG_IOPL (3<<12)
135 #define EFLG_NT (1<<14)
136 #define EFLG_OF (1<<11)
137 #define EFLG_DF (1<<10)
138 #define EFLG_IF (1<<9)
139 #define EFLG_TF (1<<8)
140 #define EFLG_SF (1<<7)
141 #define EFLG_ZF (1<<6)
142 #define EFLG_AF (1<<4)
143 #define EFLG_PF (1<<2)
144 #define EFLG_CF (1<<0)
146 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
147 #define EFLG_RESERVED_ONE_MASK 2
150 * Instruction emulation:
151 * Most instructions are emulated directly via a fragment of inline assembly
152 * code. This allows us to save/restore EFLAGS and thus very easily pick up
153 * any modified flags.
156 #if defined(CONFIG_X86_64)
157 #define _LO32 "k" /* force 32-bit operand */
158 #define _STK "%%rsp" /* stack pointer */
159 #elif defined(__i386__)
160 #define _LO32 "" /* force 32-bit operand */
161 #define _STK "%%esp" /* stack pointer */
165 * These EFLAGS bits are restored from saved value during emulation, and
166 * any changes are written back to the saved value after emulation.
168 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
170 /* Before executing instruction: restore necessary bits in EFLAGS. */
171 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
172 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
173 "movl %"_sav",%"_LO32 _tmp"; " \
176 "movl %"_msk",%"_LO32 _tmp"; " \
177 "andl %"_LO32 _tmp",("_STK"); " \
179 "notl %"_LO32 _tmp"; " \
180 "andl %"_LO32 _tmp",("_STK"); " \
181 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
183 "orl %"_LO32 _tmp",("_STK"); " \
187 /* After executing instruction: write-back necessary bits in EFLAGS. */
188 #define _POST_EFLAGS(_sav, _msk, _tmp) \
189 /* _sav |= EFLAGS & _msk; */ \
192 "andl %"_msk",%"_LO32 _tmp"; " \
193 "orl %"_LO32 _tmp",%"_sav"; "
201 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
203 __asm__ __volatile__ ( \
204 _PRE_EFLAGS("0", "4", "2") \
205 _op _suffix " %"_x"3,%1; " \
206 _POST_EFLAGS("0", "4", "2") \
207 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
209 : _y ((_src).val), "i" (EFLAGS_MASK)); \
213 /* Raw emulation: instruction has two explicit operands. */
214 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
216 unsigned long _tmp; \
218 switch ((_dst).bytes) { \
220 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
223 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
226 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
231 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
233 unsigned long _tmp; \
234 switch ((_dst).bytes) { \
236 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
239 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
240 _wx, _wy, _lx, _ly, _qx, _qy); \
245 /* Source operand is byte-sized and may be restricted to just %cl. */
246 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
247 __emulate_2op(_op, _src, _dst, _eflags, \
248 "b", "c", "b", "c", "b", "c", "b", "c")
250 /* Source operand is byte, word, long or quad sized. */
251 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
252 __emulate_2op(_op, _src, _dst, _eflags, \
253 "b", "q", "w", "r", _LO32, "r", "", "r")
255 /* Source operand is word, long or quad sized. */
256 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
257 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
258 "w", "r", _LO32, "r", "", "r")
260 /* Instruction has three operands and one operand is stored in ECX register */
261 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
263 unsigned long _tmp; \
264 _type _clv = (_cl).val; \
265 _type _srcv = (_src).val; \
266 _type _dstv = (_dst).val; \
268 __asm__ __volatile__ ( \
269 _PRE_EFLAGS("0", "5", "2") \
270 _op _suffix " %4,%1 \n" \
271 _POST_EFLAGS("0", "5", "2") \
272 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
273 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
276 (_cl).val = (unsigned long) _clv; \
277 (_src).val = (unsigned long) _srcv; \
278 (_dst).val = (unsigned long) _dstv; \
281 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
283 switch ((_dst).bytes) { \
285 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
286 "w", unsigned short); \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "l", unsigned int); \
293 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "q", unsigned long)); \
299 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
301 unsigned long _tmp; \
303 __asm__ __volatile__ ( \
304 _PRE_EFLAGS("0", "3", "2") \
305 _op _suffix " %1; " \
306 _POST_EFLAGS("0", "3", "2") \
307 : "=m" (_eflags), "+m" ((_dst).val), \
309 : "i" (EFLAGS_MASK)); \
312 /* Instruction has only one explicit operand (no source operand). */
313 #define emulate_1op(_op, _dst, _eflags) \
315 switch ((_dst).bytes) { \
316 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
317 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
318 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
319 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
323 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
325 unsigned long _tmp; \
327 __asm__ __volatile__ ( \
328 _PRE_EFLAGS("0", "4", "1") \
329 _op _suffix " %5; " \
330 _POST_EFLAGS("0", "4", "1") \
331 : "=m" (_eflags), "=&r" (_tmp), \
332 "+a" (_rax), "+d" (_rdx) \
333 : "i" (EFLAGS_MASK), "m" ((_src).val), \
334 "a" (_rax), "d" (_rdx)); \
337 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
339 unsigned long _tmp; \
341 __asm__ __volatile__ ( \
342 _PRE_EFLAGS("0", "5", "1") \
344 _op _suffix " %6; " \
346 _POST_EFLAGS("0", "5", "1") \
347 ".pushsection .fixup,\"ax\" \n\t" \
348 "3: movb $1, %4 \n\t" \
351 _ASM_EXTABLE(1b, 3b) \
352 : "=m" (_eflags), "=&r" (_tmp), \
353 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
354 : "i" (EFLAGS_MASK), "m" ((_src).val), \
355 "a" (_rax), "d" (_rdx)); \
358 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
359 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
361 switch((_src).bytes) { \
362 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
363 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
364 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
365 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
369 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
371 switch((_src).bytes) { \
373 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
374 _eflags, "b", _ex); \
377 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
378 _eflags, "w", _ex); \
381 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
382 _eflags, "l", _ex); \
385 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
386 _eflags, "q", _ex)); \
391 /* Fetch next part of the instruction being emulated. */
392 #define insn_fetch(_type, _size, _eip) \
393 ({ unsigned long _x; \
394 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
395 if (rc != X86EMUL_CONTINUE) \
401 #define insn_fetch_arr(_arr, _size, _eip) \
402 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
403 if (rc != X86EMUL_CONTINUE) \
408 static inline unsigned long ad_mask(struct decode_cache *c)
410 return (1UL << (c->ad_bytes << 3)) - 1;
413 /* Access/update address held in a register, based on addressing mode. */
414 static inline unsigned long
415 address_mask(struct decode_cache *c, unsigned long reg)
417 if (c->ad_bytes == sizeof(unsigned long))
420 return reg & ad_mask(c);
423 static inline unsigned long
424 register_address(struct decode_cache *c, unsigned long reg)
426 return address_mask(c, reg);
430 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
432 if (c->ad_bytes == sizeof(unsigned long))
435 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
438 static inline void jmp_rel(struct decode_cache *c, int rel)
440 register_address_increment(c, &c->eip, rel);
443 static void set_seg_override(struct decode_cache *c, int seg)
445 c->has_seg_override = true;
446 c->seg_override = seg;
449 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
450 struct x86_emulate_ops *ops, int seg)
452 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
455 return ops->get_cached_segment_base(seg, ctxt->vcpu);
458 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
459 struct x86_emulate_ops *ops,
460 struct decode_cache *c)
462 if (!c->has_seg_override)
465 return c->seg_override;
468 static ulong linear(struct x86_emulate_ctxt *ctxt,
469 struct segmented_address addr)
471 struct decode_cache *c = &ctxt->decode;
474 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
475 if (c->ad_bytes != 8)
480 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
481 u32 error, bool valid)
483 ctxt->exception.vector = vec;
484 ctxt->exception.error_code = error;
485 ctxt->exception.error_code_valid = valid;
486 return X86EMUL_PROPAGATE_FAULT;
489 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
491 return emulate_exception(ctxt, GP_VECTOR, err, true);
494 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
496 return emulate_exception(ctxt, UD_VECTOR, 0, false);
499 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
501 return emulate_exception(ctxt, TS_VECTOR, err, true);
504 static int emulate_de(struct x86_emulate_ctxt *ctxt)
506 return emulate_exception(ctxt, DE_VECTOR, 0, false);
509 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
511 return emulate_exception(ctxt, NM_VECTOR, 0, false);
514 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
515 struct x86_emulate_ops *ops,
516 unsigned long eip, u8 *dest)
518 struct fetch_cache *fc = &ctxt->decode.fetch;
522 if (eip == fc->end) {
523 cur_size = fc->end - fc->start;
524 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
525 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
526 size, ctxt->vcpu, &ctxt->exception);
527 if (rc != X86EMUL_CONTINUE)
531 *dest = fc->data[eip - fc->start];
532 return X86EMUL_CONTINUE;
535 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
536 struct x86_emulate_ops *ops,
537 unsigned long eip, void *dest, unsigned size)
541 /* x86 instructions are limited to 15 bytes. */
542 if (eip + size - ctxt->eip > 15)
543 return X86EMUL_UNHANDLEABLE;
545 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
546 if (rc != X86EMUL_CONTINUE)
549 return X86EMUL_CONTINUE;
553 * Given the 'reg' portion of a ModRM byte, and a register block, return a
554 * pointer into the block that addresses the relevant register.
555 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
557 static void *decode_register(u8 modrm_reg, unsigned long *regs,
562 p = ®s[modrm_reg];
563 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
564 p = (unsigned char *)®s[modrm_reg & 3] + 1;
568 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
569 struct x86_emulate_ops *ops,
570 struct segmented_address addr,
571 u16 *size, unsigned long *address, int op_bytes)
578 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
579 ctxt->vcpu, &ctxt->exception);
580 if (rc != X86EMUL_CONTINUE)
583 rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
584 ctxt->vcpu, &ctxt->exception);
588 static int test_cc(unsigned int condition, unsigned int flags)
592 switch ((condition & 15) >> 1) {
594 rc |= (flags & EFLG_OF);
596 case 1: /* b/c/nae */
597 rc |= (flags & EFLG_CF);
600 rc |= (flags & EFLG_ZF);
603 rc |= (flags & (EFLG_CF|EFLG_ZF));
606 rc |= (flags & EFLG_SF);
609 rc |= (flags & EFLG_PF);
612 rc |= (flags & EFLG_ZF);
615 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
619 /* Odd condition identifiers (lsb == 1) have inverted sense. */
620 return (!!rc ^ (condition & 1));
623 static void fetch_register_operand(struct operand *op)
627 op->val = *(u8 *)op->addr.reg;
630 op->val = *(u16 *)op->addr.reg;
633 op->val = *(u32 *)op->addr.reg;
636 op->val = *(u64 *)op->addr.reg;
641 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
643 ctxt->ops->get_fpu(ctxt);
645 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
646 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
647 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
648 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
649 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
650 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
651 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
652 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
654 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
655 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
656 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
657 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
658 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
659 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
660 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
661 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
665 ctxt->ops->put_fpu(ctxt);
668 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
671 ctxt->ops->get_fpu(ctxt);
673 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
674 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
675 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
676 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
677 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
678 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
679 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
680 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
682 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
683 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
684 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
685 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
686 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
687 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
688 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
689 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
693 ctxt->ops->put_fpu(ctxt);
696 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
698 struct decode_cache *c,
701 unsigned reg = c->modrm_reg;
702 int highbyte_regs = c->rex_prefix == 0;
705 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
711 read_sse_reg(ctxt, &op->vec_val, reg);
716 if ((c->d & ByteOp) && !inhibit_bytereg) {
717 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
720 op->addr.reg = decode_register(reg, c->regs, 0);
721 op->bytes = c->op_bytes;
723 fetch_register_operand(op);
724 op->orig_val = op->val;
727 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
728 struct x86_emulate_ops *ops,
731 struct decode_cache *c = &ctxt->decode;
733 int index_reg = 0, base_reg = 0, scale;
734 int rc = X86EMUL_CONTINUE;
738 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
739 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
740 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
743 c->modrm = insn_fetch(u8, 1, c->eip);
744 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
745 c->modrm_reg |= (c->modrm & 0x38) >> 3;
746 c->modrm_rm |= (c->modrm & 0x07);
747 c->modrm_seg = VCPU_SREG_DS;
749 if (c->modrm_mod == 3) {
751 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
752 op->addr.reg = decode_register(c->modrm_rm,
753 c->regs, c->d & ByteOp);
757 op->addr.xmm = c->modrm_rm;
758 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
761 fetch_register_operand(op);
767 if (c->ad_bytes == 2) {
768 unsigned bx = c->regs[VCPU_REGS_RBX];
769 unsigned bp = c->regs[VCPU_REGS_RBP];
770 unsigned si = c->regs[VCPU_REGS_RSI];
771 unsigned di = c->regs[VCPU_REGS_RDI];
773 /* 16-bit ModR/M decode. */
774 switch (c->modrm_mod) {
776 if (c->modrm_rm == 6)
777 modrm_ea += insn_fetch(u16, 2, c->eip);
780 modrm_ea += insn_fetch(s8, 1, c->eip);
783 modrm_ea += insn_fetch(u16, 2, c->eip);
786 switch (c->modrm_rm) {
806 if (c->modrm_mod != 0)
813 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
814 (c->modrm_rm == 6 && c->modrm_mod != 0))
815 c->modrm_seg = VCPU_SREG_SS;
816 modrm_ea = (u16)modrm_ea;
818 /* 32/64-bit ModR/M decode. */
819 if ((c->modrm_rm & 7) == 4) {
820 sib = insn_fetch(u8, 1, c->eip);
821 index_reg |= (sib >> 3) & 7;
825 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
826 modrm_ea += insn_fetch(s32, 4, c->eip);
828 modrm_ea += c->regs[base_reg];
830 modrm_ea += c->regs[index_reg] << scale;
831 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
832 if (ctxt->mode == X86EMUL_MODE_PROT64)
835 modrm_ea += c->regs[c->modrm_rm];
836 switch (c->modrm_mod) {
838 if (c->modrm_rm == 5)
839 modrm_ea += insn_fetch(s32, 4, c->eip);
842 modrm_ea += insn_fetch(s8, 1, c->eip);
845 modrm_ea += insn_fetch(s32, 4, c->eip);
849 op->addr.mem.ea = modrm_ea;
854 static int decode_abs(struct x86_emulate_ctxt *ctxt,
855 struct x86_emulate_ops *ops,
858 struct decode_cache *c = &ctxt->decode;
859 int rc = X86EMUL_CONTINUE;
862 switch (c->ad_bytes) {
864 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
867 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
870 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
877 static void fetch_bit_operand(struct decode_cache *c)
881 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
882 mask = ~(c->dst.bytes * 8 - 1);
884 if (c->src.bytes == 2)
885 sv = (s16)c->src.val & (s16)mask;
886 else if (c->src.bytes == 4)
887 sv = (s32)c->src.val & (s32)mask;
889 c->dst.addr.mem.ea += (sv >> 3);
892 /* only subword offset */
893 c->src.val &= (c->dst.bytes << 3) - 1;
896 static int read_emulated(struct x86_emulate_ctxt *ctxt,
897 struct x86_emulate_ops *ops,
898 unsigned long addr, void *dest, unsigned size)
901 struct read_cache *mc = &ctxt->decode.mem_read;
904 int n = min(size, 8u);
906 if (mc->pos < mc->end)
909 rc = ops->read_emulated(addr, mc->data + mc->end, n,
910 &ctxt->exception, ctxt->vcpu);
911 if (rc != X86EMUL_CONTINUE)
916 memcpy(dest, mc->data + mc->pos, n);
921 return X86EMUL_CONTINUE;
924 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
925 struct x86_emulate_ops *ops,
926 unsigned int size, unsigned short port,
929 struct read_cache *rc = &ctxt->decode.io_read;
931 if (rc->pos == rc->end) { /* refill pio read ahead */
932 struct decode_cache *c = &ctxt->decode;
933 unsigned int in_page, n;
934 unsigned int count = c->rep_prefix ?
935 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
936 in_page = (ctxt->eflags & EFLG_DF) ?
937 offset_in_page(c->regs[VCPU_REGS_RDI]) :
938 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
939 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
943 rc->pos = rc->end = 0;
944 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
949 memcpy(dest, rc->data + rc->pos, size);
954 static u32 desc_limit_scaled(struct desc_struct *desc)
956 u32 limit = get_desc_limit(desc);
958 return desc->g ? (limit << 12) | 0xfff : limit;
961 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
962 struct x86_emulate_ops *ops,
963 u16 selector, struct desc_ptr *dt)
965 if (selector & 1 << 2) {
966 struct desc_struct desc;
967 memset (dt, 0, sizeof *dt);
968 if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
972 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
973 dt->address = get_desc_base(&desc);
975 ops->get_gdt(dt, ctxt->vcpu);
978 /* allowed just for 8 bytes segments */
979 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
980 struct x86_emulate_ops *ops,
981 u16 selector, struct desc_struct *desc)
984 u16 index = selector >> 3;
988 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
990 if (dt.size < index * 8 + 7)
991 return emulate_gp(ctxt, selector & 0xfffc);
992 addr = dt.address + index * 8;
993 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
999 /* allowed just for 8 bytes segments */
1000 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1001 struct x86_emulate_ops *ops,
1002 u16 selector, struct desc_struct *desc)
1005 u16 index = selector >> 3;
1009 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1011 if (dt.size < index * 8 + 7)
1012 return emulate_gp(ctxt, selector & 0xfffc);
1014 addr = dt.address + index * 8;
1015 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
1021 /* Does not support long mode */
1022 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1023 struct x86_emulate_ops *ops,
1024 u16 selector, int seg)
1026 struct desc_struct seg_desc;
1028 unsigned err_vec = GP_VECTOR;
1030 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1033 memset(&seg_desc, 0, sizeof seg_desc);
1035 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1036 || ctxt->mode == X86EMUL_MODE_REAL) {
1037 /* set real mode segment descriptor */
1038 set_desc_base(&seg_desc, selector << 4);
1039 set_desc_limit(&seg_desc, 0xffff);
1046 /* NULL selector is not valid for TR, CS and SS */
1047 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1051 /* TR should be in GDT only */
1052 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1055 if (null_selector) /* for NULL selector skip all following checks */
1058 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1059 if (ret != X86EMUL_CONTINUE)
1062 err_code = selector & 0xfffc;
1063 err_vec = GP_VECTOR;
1065 /* can't load system descriptor into segment selecor */
1066 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1070 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1076 cpl = ops->cpl(ctxt->vcpu);
1081 * segment is not a writable data segment or segment
1082 * selector's RPL != CPL or segment selector's RPL != CPL
1084 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1088 if (!(seg_desc.type & 8))
1091 if (seg_desc.type & 4) {
1097 if (rpl > cpl || dpl != cpl)
1100 /* CS(RPL) <- CPL */
1101 selector = (selector & 0xfffc) | cpl;
1104 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1107 case VCPU_SREG_LDTR:
1108 if (seg_desc.s || seg_desc.type != 2)
1111 default: /* DS, ES, FS, or GS */
1113 * segment is not a data or readable code segment or
1114 * ((segment is a data or nonconforming code segment)
1115 * and (both RPL and CPL > DPL))
1117 if ((seg_desc.type & 0xa) == 0x8 ||
1118 (((seg_desc.type & 0xc) != 0xc) &&
1119 (rpl > dpl && cpl > dpl)))
1125 /* mark segment as accessed */
1127 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1128 if (ret != X86EMUL_CONTINUE)
1132 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1133 ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1134 return X86EMUL_CONTINUE;
1136 emulate_exception(ctxt, err_vec, err_code, true);
1137 return X86EMUL_PROPAGATE_FAULT;
1140 static void write_register_operand(struct operand *op)
1142 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1143 switch (op->bytes) {
1145 *(u8 *)op->addr.reg = (u8)op->val;
1148 *(u16 *)op->addr.reg = (u16)op->val;
1151 *op->addr.reg = (u32)op->val;
1152 break; /* 64b: zero-extend */
1154 *op->addr.reg = op->val;
1159 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1160 struct x86_emulate_ops *ops)
1163 struct decode_cache *c = &ctxt->decode;
1165 switch (c->dst.type) {
1167 write_register_operand(&c->dst);
1171 rc = ops->cmpxchg_emulated(
1172 linear(ctxt, c->dst.addr.mem),
1179 rc = ops->write_emulated(
1180 linear(ctxt, c->dst.addr.mem),
1185 if (rc != X86EMUL_CONTINUE)
1189 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1197 return X86EMUL_CONTINUE;
1200 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1201 struct x86_emulate_ops *ops)
1203 struct decode_cache *c = &ctxt->decode;
1205 c->dst.type = OP_MEM;
1206 c->dst.bytes = c->op_bytes;
1207 c->dst.val = c->src.val;
1208 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1209 c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1210 c->dst.addr.mem.seg = VCPU_SREG_SS;
1213 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1214 struct x86_emulate_ops *ops,
1215 void *dest, int len)
1217 struct decode_cache *c = &ctxt->decode;
1219 struct segmented_address addr;
1221 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1222 addr.seg = VCPU_SREG_SS;
1223 rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1224 if (rc != X86EMUL_CONTINUE)
1227 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1231 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1232 struct x86_emulate_ops *ops,
1233 void *dest, int len)
1236 unsigned long val, change_mask;
1237 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1238 int cpl = ops->cpl(ctxt->vcpu);
1240 rc = emulate_pop(ctxt, ops, &val, len);
1241 if (rc != X86EMUL_CONTINUE)
1244 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1245 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1247 switch(ctxt->mode) {
1248 case X86EMUL_MODE_PROT64:
1249 case X86EMUL_MODE_PROT32:
1250 case X86EMUL_MODE_PROT16:
1252 change_mask |= EFLG_IOPL;
1254 change_mask |= EFLG_IF;
1256 case X86EMUL_MODE_VM86:
1258 return emulate_gp(ctxt, 0);
1259 change_mask |= EFLG_IF;
1261 default: /* real mode */
1262 change_mask |= (EFLG_IOPL | EFLG_IF);
1266 *(unsigned long *)dest =
1267 (ctxt->eflags & ~change_mask) | (val & change_mask);
1272 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1273 struct x86_emulate_ops *ops, int seg)
1275 struct decode_cache *c = &ctxt->decode;
1277 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1279 emulate_push(ctxt, ops);
1282 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1283 struct x86_emulate_ops *ops, int seg)
1285 struct decode_cache *c = &ctxt->decode;
1286 unsigned long selector;
1289 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1290 if (rc != X86EMUL_CONTINUE)
1293 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1297 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1298 struct x86_emulate_ops *ops)
1300 struct decode_cache *c = &ctxt->decode;
1301 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1302 int rc = X86EMUL_CONTINUE;
1303 int reg = VCPU_REGS_RAX;
1305 while (reg <= VCPU_REGS_RDI) {
1306 (reg == VCPU_REGS_RSP) ?
1307 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1309 emulate_push(ctxt, ops);
1311 rc = writeback(ctxt, ops);
1312 if (rc != X86EMUL_CONTINUE)
1318 /* Disable writeback. */
1319 c->dst.type = OP_NONE;
1324 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1325 struct x86_emulate_ops *ops)
1327 struct decode_cache *c = &ctxt->decode;
1328 int rc = X86EMUL_CONTINUE;
1329 int reg = VCPU_REGS_RDI;
1331 while (reg >= VCPU_REGS_RAX) {
1332 if (reg == VCPU_REGS_RSP) {
1333 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1338 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1339 if (rc != X86EMUL_CONTINUE)
1346 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1347 struct x86_emulate_ops *ops, int irq)
1349 struct decode_cache *c = &ctxt->decode;
1356 /* TODO: Add limit checks */
1357 c->src.val = ctxt->eflags;
1358 emulate_push(ctxt, ops);
1359 rc = writeback(ctxt, ops);
1360 if (rc != X86EMUL_CONTINUE)
1363 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1365 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1366 emulate_push(ctxt, ops);
1367 rc = writeback(ctxt, ops);
1368 if (rc != X86EMUL_CONTINUE)
1371 c->src.val = c->eip;
1372 emulate_push(ctxt, ops);
1373 rc = writeback(ctxt, ops);
1374 if (rc != X86EMUL_CONTINUE)
1377 c->dst.type = OP_NONE;
1379 ops->get_idt(&dt, ctxt->vcpu);
1381 eip_addr = dt.address + (irq << 2);
1382 cs_addr = dt.address + (irq << 2) + 2;
1384 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1385 if (rc != X86EMUL_CONTINUE)
1388 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1389 if (rc != X86EMUL_CONTINUE)
1392 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1393 if (rc != X86EMUL_CONTINUE)
1401 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1402 struct x86_emulate_ops *ops, int irq)
1404 switch(ctxt->mode) {
1405 case X86EMUL_MODE_REAL:
1406 return emulate_int_real(ctxt, ops, irq);
1407 case X86EMUL_MODE_VM86:
1408 case X86EMUL_MODE_PROT16:
1409 case X86EMUL_MODE_PROT32:
1410 case X86EMUL_MODE_PROT64:
1412 /* Protected mode interrupts unimplemented yet */
1413 return X86EMUL_UNHANDLEABLE;
1417 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1418 struct x86_emulate_ops *ops)
1420 struct decode_cache *c = &ctxt->decode;
1421 int rc = X86EMUL_CONTINUE;
1422 unsigned long temp_eip = 0;
1423 unsigned long temp_eflags = 0;
1424 unsigned long cs = 0;
1425 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1426 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1427 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1428 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1430 /* TODO: Add stack limit check */
1432 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1434 if (rc != X86EMUL_CONTINUE)
1437 if (temp_eip & ~0xffff)
1438 return emulate_gp(ctxt, 0);
1440 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1442 if (rc != X86EMUL_CONTINUE)
1445 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1447 if (rc != X86EMUL_CONTINUE)
1450 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1452 if (rc != X86EMUL_CONTINUE)
1458 if (c->op_bytes == 4)
1459 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1460 else if (c->op_bytes == 2) {
1461 ctxt->eflags &= ~0xffff;
1462 ctxt->eflags |= temp_eflags;
1465 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1466 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1471 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1472 struct x86_emulate_ops* ops)
1474 switch(ctxt->mode) {
1475 case X86EMUL_MODE_REAL:
1476 return emulate_iret_real(ctxt, ops);
1477 case X86EMUL_MODE_VM86:
1478 case X86EMUL_MODE_PROT16:
1479 case X86EMUL_MODE_PROT32:
1480 case X86EMUL_MODE_PROT64:
1482 /* iret from protected mode unimplemented yet */
1483 return X86EMUL_UNHANDLEABLE;
1487 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1488 struct x86_emulate_ops *ops)
1490 struct decode_cache *c = &ctxt->decode;
1492 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1495 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1497 struct decode_cache *c = &ctxt->decode;
1498 switch (c->modrm_reg) {
1500 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1503 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1506 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1509 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1511 case 4: /* sal/shl */
1512 case 6: /* sal/shl */
1513 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1516 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1519 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1524 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1525 struct x86_emulate_ops *ops)
1527 struct decode_cache *c = &ctxt->decode;
1528 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1529 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1532 switch (c->modrm_reg) {
1533 case 0 ... 1: /* test */
1534 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1537 c->dst.val = ~c->dst.val;
1540 emulate_1op("neg", c->dst, ctxt->eflags);
1543 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1546 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1549 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1553 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1557 return X86EMUL_UNHANDLEABLE;
1560 return emulate_de(ctxt);
1561 return X86EMUL_CONTINUE;
1564 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1565 struct x86_emulate_ops *ops)
1567 struct decode_cache *c = &ctxt->decode;
1569 switch (c->modrm_reg) {
1571 emulate_1op("inc", c->dst, ctxt->eflags);
1574 emulate_1op("dec", c->dst, ctxt->eflags);
1576 case 2: /* call near abs */ {
1579 c->eip = c->src.val;
1580 c->src.val = old_eip;
1581 emulate_push(ctxt, ops);
1584 case 4: /* jmp abs */
1585 c->eip = c->src.val;
1588 emulate_push(ctxt, ops);
1591 return X86EMUL_CONTINUE;
1594 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1595 struct x86_emulate_ops *ops)
1597 struct decode_cache *c = &ctxt->decode;
1598 u64 old = c->dst.orig_val64;
1600 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1601 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1602 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1603 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1604 ctxt->eflags &= ~EFLG_ZF;
1606 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1607 (u32) c->regs[VCPU_REGS_RBX];
1609 ctxt->eflags |= EFLG_ZF;
1611 return X86EMUL_CONTINUE;
1614 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1615 struct x86_emulate_ops *ops)
1617 struct decode_cache *c = &ctxt->decode;
1621 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1622 if (rc != X86EMUL_CONTINUE)
1624 if (c->op_bytes == 4)
1625 c->eip = (u32)c->eip;
1626 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1627 if (rc != X86EMUL_CONTINUE)
1629 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1633 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1634 struct x86_emulate_ops *ops, int seg)
1636 struct decode_cache *c = &ctxt->decode;
1640 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1642 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1643 if (rc != X86EMUL_CONTINUE)
1646 c->dst.val = c->src.val;
1651 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1652 struct x86_emulate_ops *ops, struct desc_struct *cs,
1653 struct desc_struct *ss)
1655 memset(cs, 0, sizeof(struct desc_struct));
1656 ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1657 memset(ss, 0, sizeof(struct desc_struct));
1659 cs->l = 0; /* will be adjusted later */
1660 set_desc_base(cs, 0); /* flat segment */
1661 cs->g = 1; /* 4kb granularity */
1662 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1663 cs->type = 0x0b; /* Read, Execute, Accessed */
1665 cs->dpl = 0; /* will be adjusted later */
1669 set_desc_base(ss, 0); /* flat segment */
1670 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1671 ss->g = 1; /* 4kb granularity */
1673 ss->type = 0x03; /* Read/Write, Accessed */
1674 ss->d = 1; /* 32bit stack segment */
1680 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1682 struct decode_cache *c = &ctxt->decode;
1683 struct desc_struct cs, ss;
1687 /* syscall is not available in real mode */
1688 if (ctxt->mode == X86EMUL_MODE_REAL ||
1689 ctxt->mode == X86EMUL_MODE_VM86)
1690 return emulate_ud(ctxt);
1692 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1694 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1696 cs_sel = (u16)(msr_data & 0xfffc);
1697 ss_sel = (u16)(msr_data + 8);
1699 if (is_long_mode(ctxt->vcpu)) {
1703 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1704 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1705 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1706 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1708 c->regs[VCPU_REGS_RCX] = c->eip;
1709 if (is_long_mode(ctxt->vcpu)) {
1710 #ifdef CONFIG_X86_64
1711 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1713 ops->get_msr(ctxt->vcpu,
1714 ctxt->mode == X86EMUL_MODE_PROT64 ?
1715 MSR_LSTAR : MSR_CSTAR, &msr_data);
1718 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1719 ctxt->eflags &= ~(msr_data | EFLG_RF);
1723 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1724 c->eip = (u32)msr_data;
1726 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1729 return X86EMUL_CONTINUE;
1733 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1735 struct decode_cache *c = &ctxt->decode;
1736 struct desc_struct cs, ss;
1740 /* inject #GP if in real mode */
1741 if (ctxt->mode == X86EMUL_MODE_REAL)
1742 return emulate_gp(ctxt, 0);
1744 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1745 * Therefore, we inject an #UD.
1747 if (ctxt->mode == X86EMUL_MODE_PROT64)
1748 return emulate_ud(ctxt);
1750 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1752 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1753 switch (ctxt->mode) {
1754 case X86EMUL_MODE_PROT32:
1755 if ((msr_data & 0xfffc) == 0x0)
1756 return emulate_gp(ctxt, 0);
1758 case X86EMUL_MODE_PROT64:
1759 if (msr_data == 0x0)
1760 return emulate_gp(ctxt, 0);
1764 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1765 cs_sel = (u16)msr_data;
1766 cs_sel &= ~SELECTOR_RPL_MASK;
1767 ss_sel = cs_sel + 8;
1768 ss_sel &= ~SELECTOR_RPL_MASK;
1769 if (ctxt->mode == X86EMUL_MODE_PROT64
1770 || is_long_mode(ctxt->vcpu)) {
1775 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1776 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1777 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1778 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1780 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1783 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1784 c->regs[VCPU_REGS_RSP] = msr_data;
1786 return X86EMUL_CONTINUE;
1790 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1792 struct decode_cache *c = &ctxt->decode;
1793 struct desc_struct cs, ss;
1798 /* inject #GP if in real mode or Virtual 8086 mode */
1799 if (ctxt->mode == X86EMUL_MODE_REAL ||
1800 ctxt->mode == X86EMUL_MODE_VM86)
1801 return emulate_gp(ctxt, 0);
1803 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1805 if ((c->rex_prefix & 0x8) != 0x0)
1806 usermode = X86EMUL_MODE_PROT64;
1808 usermode = X86EMUL_MODE_PROT32;
1812 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1814 case X86EMUL_MODE_PROT32:
1815 cs_sel = (u16)(msr_data + 16);
1816 if ((msr_data & 0xfffc) == 0x0)
1817 return emulate_gp(ctxt, 0);
1818 ss_sel = (u16)(msr_data + 24);
1820 case X86EMUL_MODE_PROT64:
1821 cs_sel = (u16)(msr_data + 32);
1822 if (msr_data == 0x0)
1823 return emulate_gp(ctxt, 0);
1824 ss_sel = cs_sel + 8;
1829 cs_sel |= SELECTOR_RPL_MASK;
1830 ss_sel |= SELECTOR_RPL_MASK;
1832 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1833 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1834 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1835 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1837 c->eip = c->regs[VCPU_REGS_RDX];
1838 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1840 return X86EMUL_CONTINUE;
1843 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1844 struct x86_emulate_ops *ops)
1847 if (ctxt->mode == X86EMUL_MODE_REAL)
1849 if (ctxt->mode == X86EMUL_MODE_VM86)
1851 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1852 return ops->cpl(ctxt->vcpu) > iopl;
1855 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1856 struct x86_emulate_ops *ops,
1859 struct desc_struct tr_seg;
1862 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
1863 unsigned mask = (1 << len) - 1;
1866 ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1869 if (desc_limit_scaled(&tr_seg) < 103)
1871 base = get_desc_base(&tr_seg);
1872 #ifdef CONFIG_X86_64
1873 base |= ((u64)base3) << 32;
1875 r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1876 if (r != X86EMUL_CONTINUE)
1878 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1880 r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu,
1882 if (r != X86EMUL_CONTINUE)
1884 if ((perm >> bit_idx) & mask)
1889 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1890 struct x86_emulate_ops *ops,
1896 if (emulator_bad_iopl(ctxt, ops))
1897 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1900 ctxt->perm_ok = true;
1905 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1906 struct x86_emulate_ops *ops,
1907 struct tss_segment_16 *tss)
1909 struct decode_cache *c = &ctxt->decode;
1912 tss->flag = ctxt->eflags;
1913 tss->ax = c->regs[VCPU_REGS_RAX];
1914 tss->cx = c->regs[VCPU_REGS_RCX];
1915 tss->dx = c->regs[VCPU_REGS_RDX];
1916 tss->bx = c->regs[VCPU_REGS_RBX];
1917 tss->sp = c->regs[VCPU_REGS_RSP];
1918 tss->bp = c->regs[VCPU_REGS_RBP];
1919 tss->si = c->regs[VCPU_REGS_RSI];
1920 tss->di = c->regs[VCPU_REGS_RDI];
1922 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1923 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1924 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1925 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1926 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1929 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1930 struct x86_emulate_ops *ops,
1931 struct tss_segment_16 *tss)
1933 struct decode_cache *c = &ctxt->decode;
1937 ctxt->eflags = tss->flag | 2;
1938 c->regs[VCPU_REGS_RAX] = tss->ax;
1939 c->regs[VCPU_REGS_RCX] = tss->cx;
1940 c->regs[VCPU_REGS_RDX] = tss->dx;
1941 c->regs[VCPU_REGS_RBX] = tss->bx;
1942 c->regs[VCPU_REGS_RSP] = tss->sp;
1943 c->regs[VCPU_REGS_RBP] = tss->bp;
1944 c->regs[VCPU_REGS_RSI] = tss->si;
1945 c->regs[VCPU_REGS_RDI] = tss->di;
1948 * SDM says that segment selectors are loaded before segment
1951 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1952 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1953 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1954 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1955 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1958 * Now load segment descriptors. If fault happenes at this stage
1959 * it is handled in a context of new task
1961 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1962 if (ret != X86EMUL_CONTINUE)
1964 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1965 if (ret != X86EMUL_CONTINUE)
1967 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1968 if (ret != X86EMUL_CONTINUE)
1970 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1971 if (ret != X86EMUL_CONTINUE)
1973 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1974 if (ret != X86EMUL_CONTINUE)
1977 return X86EMUL_CONTINUE;
1980 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1981 struct x86_emulate_ops *ops,
1982 u16 tss_selector, u16 old_tss_sel,
1983 ulong old_tss_base, struct desc_struct *new_desc)
1985 struct tss_segment_16 tss_seg;
1987 u32 new_tss_base = get_desc_base(new_desc);
1989 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1991 if (ret != X86EMUL_CONTINUE)
1992 /* FIXME: need to provide precise fault address */
1995 save_state_to_tss16(ctxt, ops, &tss_seg);
1997 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1999 if (ret != X86EMUL_CONTINUE)
2000 /* FIXME: need to provide precise fault address */
2003 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2005 if (ret != X86EMUL_CONTINUE)
2006 /* FIXME: need to provide precise fault address */
2009 if (old_tss_sel != 0xffff) {
2010 tss_seg.prev_task_link = old_tss_sel;
2012 ret = ops->write_std(new_tss_base,
2013 &tss_seg.prev_task_link,
2014 sizeof tss_seg.prev_task_link,
2015 ctxt->vcpu, &ctxt->exception);
2016 if (ret != X86EMUL_CONTINUE)
2017 /* FIXME: need to provide precise fault address */
2021 return load_state_from_tss16(ctxt, ops, &tss_seg);
2024 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2025 struct x86_emulate_ops *ops,
2026 struct tss_segment_32 *tss)
2028 struct decode_cache *c = &ctxt->decode;
2030 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
2032 tss->eflags = ctxt->eflags;
2033 tss->eax = c->regs[VCPU_REGS_RAX];
2034 tss->ecx = c->regs[VCPU_REGS_RCX];
2035 tss->edx = c->regs[VCPU_REGS_RDX];
2036 tss->ebx = c->regs[VCPU_REGS_RBX];
2037 tss->esp = c->regs[VCPU_REGS_RSP];
2038 tss->ebp = c->regs[VCPU_REGS_RBP];
2039 tss->esi = c->regs[VCPU_REGS_RSI];
2040 tss->edi = c->regs[VCPU_REGS_RDI];
2042 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2043 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2044 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2045 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2046 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2047 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2048 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2051 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2052 struct x86_emulate_ops *ops,
2053 struct tss_segment_32 *tss)
2055 struct decode_cache *c = &ctxt->decode;
2058 if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
2059 return emulate_gp(ctxt, 0);
2061 ctxt->eflags = tss->eflags | 2;
2062 c->regs[VCPU_REGS_RAX] = tss->eax;
2063 c->regs[VCPU_REGS_RCX] = tss->ecx;
2064 c->regs[VCPU_REGS_RDX] = tss->edx;
2065 c->regs[VCPU_REGS_RBX] = tss->ebx;
2066 c->regs[VCPU_REGS_RSP] = tss->esp;
2067 c->regs[VCPU_REGS_RBP] = tss->ebp;
2068 c->regs[VCPU_REGS_RSI] = tss->esi;
2069 c->regs[VCPU_REGS_RDI] = tss->edi;
2072 * SDM says that segment selectors are loaded before segment
2075 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2076 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2077 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2078 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2079 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2080 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2081 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2084 * Now load segment descriptors. If fault happenes at this stage
2085 * it is handled in a context of new task
2087 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2088 if (ret != X86EMUL_CONTINUE)
2090 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2091 if (ret != X86EMUL_CONTINUE)
2093 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2094 if (ret != X86EMUL_CONTINUE)
2096 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2097 if (ret != X86EMUL_CONTINUE)
2099 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2100 if (ret != X86EMUL_CONTINUE)
2102 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2103 if (ret != X86EMUL_CONTINUE)
2105 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2106 if (ret != X86EMUL_CONTINUE)
2109 return X86EMUL_CONTINUE;
2112 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2113 struct x86_emulate_ops *ops,
2114 u16 tss_selector, u16 old_tss_sel,
2115 ulong old_tss_base, struct desc_struct *new_desc)
2117 struct tss_segment_32 tss_seg;
2119 u32 new_tss_base = get_desc_base(new_desc);
2121 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2123 if (ret != X86EMUL_CONTINUE)
2124 /* FIXME: need to provide precise fault address */
2127 save_state_to_tss32(ctxt, ops, &tss_seg);
2129 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2131 if (ret != X86EMUL_CONTINUE)
2132 /* FIXME: need to provide precise fault address */
2135 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2137 if (ret != X86EMUL_CONTINUE)
2138 /* FIXME: need to provide precise fault address */
2141 if (old_tss_sel != 0xffff) {
2142 tss_seg.prev_task_link = old_tss_sel;
2144 ret = ops->write_std(new_tss_base,
2145 &tss_seg.prev_task_link,
2146 sizeof tss_seg.prev_task_link,
2147 ctxt->vcpu, &ctxt->exception);
2148 if (ret != X86EMUL_CONTINUE)
2149 /* FIXME: need to provide precise fault address */
2153 return load_state_from_tss32(ctxt, ops, &tss_seg);
2156 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2157 struct x86_emulate_ops *ops,
2158 u16 tss_selector, int reason,
2159 bool has_error_code, u32 error_code)
2161 struct desc_struct curr_tss_desc, next_tss_desc;
2163 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2164 ulong old_tss_base =
2165 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2168 /* FIXME: old_tss_base == ~0 ? */
2170 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2171 if (ret != X86EMUL_CONTINUE)
2173 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2174 if (ret != X86EMUL_CONTINUE)
2177 /* FIXME: check that next_tss_desc is tss */
2179 if (reason != TASK_SWITCH_IRET) {
2180 if ((tss_selector & 3) > next_tss_desc.dpl ||
2181 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
2182 return emulate_gp(ctxt, 0);
2185 desc_limit = desc_limit_scaled(&next_tss_desc);
2186 if (!next_tss_desc.p ||
2187 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2188 desc_limit < 0x2b)) {
2189 emulate_ts(ctxt, tss_selector & 0xfffc);
2190 return X86EMUL_PROPAGATE_FAULT;
2193 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2194 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2195 write_segment_descriptor(ctxt, ops, old_tss_sel,
2199 if (reason == TASK_SWITCH_IRET)
2200 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2202 /* set back link to prev task only if NT bit is set in eflags
2203 note that old_tss_sel is not used afetr this point */
2204 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2205 old_tss_sel = 0xffff;
2207 if (next_tss_desc.type & 8)
2208 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2209 old_tss_base, &next_tss_desc);
2211 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2212 old_tss_base, &next_tss_desc);
2213 if (ret != X86EMUL_CONTINUE)
2216 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2217 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2219 if (reason != TASK_SWITCH_IRET) {
2220 next_tss_desc.type |= (1 << 1); /* set busy flag */
2221 write_segment_descriptor(ctxt, ops, tss_selector,
2225 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2226 ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2227 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2229 if (has_error_code) {
2230 struct decode_cache *c = &ctxt->decode;
2232 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2234 c->src.val = (unsigned long) error_code;
2235 emulate_push(ctxt, ops);
2241 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2242 u16 tss_selector, int reason,
2243 bool has_error_code, u32 error_code)
2245 struct x86_emulate_ops *ops = ctxt->ops;
2246 struct decode_cache *c = &ctxt->decode;
2250 c->dst.type = OP_NONE;
2252 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2253 has_error_code, error_code);
2255 if (rc == X86EMUL_CONTINUE) {
2256 rc = writeback(ctxt, ops);
2257 if (rc == X86EMUL_CONTINUE)
2261 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2264 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2265 int reg, struct operand *op)
2267 struct decode_cache *c = &ctxt->decode;
2268 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2270 register_address_increment(c, &c->regs[reg], df * op->bytes);
2271 op->addr.mem.ea = register_address(c, c->regs[reg]);
2272 op->addr.mem.seg = seg;
2275 static int em_push(struct x86_emulate_ctxt *ctxt)
2277 emulate_push(ctxt, ctxt->ops);
2278 return X86EMUL_CONTINUE;
2281 static int em_das(struct x86_emulate_ctxt *ctxt)
2283 struct decode_cache *c = &ctxt->decode;
2285 bool af, cf, old_cf;
2287 cf = ctxt->eflags & X86_EFLAGS_CF;
2293 af = ctxt->eflags & X86_EFLAGS_AF;
2294 if ((al & 0x0f) > 9 || af) {
2296 cf = old_cf | (al >= 250);
2301 if (old_al > 0x99 || old_cf) {
2307 /* Set PF, ZF, SF */
2308 c->src.type = OP_IMM;
2311 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2312 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2314 ctxt->eflags |= X86_EFLAGS_CF;
2316 ctxt->eflags |= X86_EFLAGS_AF;
2317 return X86EMUL_CONTINUE;
2320 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2322 struct decode_cache *c = &ctxt->decode;
2327 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2330 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2331 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2332 return X86EMUL_CONTINUE;
2335 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2337 c->src.val = old_cs;
2338 emulate_push(ctxt, ctxt->ops);
2339 rc = writeback(ctxt, ctxt->ops);
2340 if (rc != X86EMUL_CONTINUE)
2343 c->src.val = old_eip;
2344 emulate_push(ctxt, ctxt->ops);
2345 rc = writeback(ctxt, ctxt->ops);
2346 if (rc != X86EMUL_CONTINUE)
2349 c->dst.type = OP_NONE;
2351 return X86EMUL_CONTINUE;
2354 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2356 struct decode_cache *c = &ctxt->decode;
2359 c->dst.type = OP_REG;
2360 c->dst.addr.reg = &c->eip;
2361 c->dst.bytes = c->op_bytes;
2362 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2363 if (rc != X86EMUL_CONTINUE)
2365 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2366 return X86EMUL_CONTINUE;
2369 static int em_imul(struct x86_emulate_ctxt *ctxt)
2371 struct decode_cache *c = &ctxt->decode;
2373 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2374 return X86EMUL_CONTINUE;
2377 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2379 struct decode_cache *c = &ctxt->decode;
2381 c->dst.val = c->src2.val;
2382 return em_imul(ctxt);
2385 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2387 struct decode_cache *c = &ctxt->decode;
2389 c->dst.type = OP_REG;
2390 c->dst.bytes = c->src.bytes;
2391 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2392 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2394 return X86EMUL_CONTINUE;
2397 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2399 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2400 struct decode_cache *c = &ctxt->decode;
2403 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
2404 return emulate_gp(ctxt, 0);
2405 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2406 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2407 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2408 return X86EMUL_CONTINUE;
2411 static int em_mov(struct x86_emulate_ctxt *ctxt)
2413 struct decode_cache *c = &ctxt->decode;
2414 c->dst.val = c->src.val;
2415 return X86EMUL_CONTINUE;
2418 #define D(_y) { .flags = (_y) }
2420 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2421 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2422 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2424 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2425 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2427 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2428 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2429 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2432 static struct opcode group1[] = {
2436 static struct opcode group1A[] = {
2437 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2440 static struct opcode group3[] = {
2441 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2442 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2443 X4(D(SrcMem | ModRM)),
2446 static struct opcode group4[] = {
2447 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2451 static struct opcode group5[] = {
2452 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2453 D(SrcMem | ModRM | Stack),
2454 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2455 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2456 D(SrcMem | ModRM | Stack), N,
2459 static struct group_dual group7 = { {
2460 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2461 D(SrcNone | ModRM | DstMem | Mov), N,
2462 D(SrcMem16 | ModRM | Mov | Priv),
2463 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2465 D(SrcNone | ModRM | Priv | VendorSpecific), N,
2466 N, D(SrcNone | ModRM | Priv | VendorSpecific),
2467 D(SrcNone | ModRM | DstMem | Mov), N,
2468 D(SrcMem16 | ModRM | Mov | Priv), N,
2471 static struct opcode group8[] = {
2473 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2474 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2477 static struct group_dual group9 = { {
2478 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2480 N, N, N, N, N, N, N, N,
2483 static struct opcode group11[] = {
2484 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2487 static struct opcode opcode_table[256] = {
2490 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2493 D(ImplicitOps | Stack | No64), N,
2496 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2499 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2503 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2511 X8(I(SrcReg | Stack, em_push)),
2513 X8(D(DstReg | Stack)),
2515 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2516 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2519 I(SrcImm | Mov | Stack, em_push),
2520 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2521 I(SrcImmByte | Mov | Stack, em_push),
2522 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2523 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2524 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2528 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2529 G(DstMem | SrcImm | ModRM | Group, group1),
2530 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2531 G(DstMem | SrcImmByte | ModRM | Group, group1),
2532 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2534 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2535 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2536 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2537 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2539 X8(D(SrcAcc | DstReg)),
2541 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2542 I(SrcImmFAddr | No64, em_call_far), N,
2543 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2545 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2546 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2547 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2548 D2bv(SrcSI | DstDI | String),
2550 D2bv(DstAcc | SrcImm),
2551 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2552 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2553 D2bv(SrcAcc | DstDI | String),
2555 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2557 X8(I(DstReg | SrcImm | Mov, em_mov)),
2559 D2bv(DstMem | SrcImmByte | ModRM),
2560 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2561 D(ImplicitOps | Stack),
2562 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2563 G(ByteOp, group11), G(0, group11),
2565 N, N, N, D(ImplicitOps | Stack),
2566 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2568 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2571 N, N, N, N, N, N, N, N,
2574 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2576 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2577 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2578 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2581 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2583 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2584 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2587 static struct opcode twobyte_table[256] = {
2589 N, GD(0, &group7), N, N,
2590 N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
2591 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2592 N, D(ImplicitOps | ModRM), N, N,
2594 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2596 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2597 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2599 N, N, N, N, N, N, N, N,
2601 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2602 D(ImplicitOps | Priv), N,
2603 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
2605 N, N, N, N, N, N, N, N,
2607 X16(D(DstReg | SrcMem | ModRM | Mov)),
2609 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2611 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2613 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2617 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2619 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2620 N, D(DstMem | SrcReg | ModRM | BitOp),
2621 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2622 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2624 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2625 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2626 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2627 D(DstMem | SrcReg | Src2CL | ModRM),
2628 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2630 D2bv(DstMem | SrcReg | ModRM | Lock),
2631 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2632 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2633 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2636 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2637 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2638 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2640 D2bv(DstMem | SrcReg | ModRM | Lock),
2641 N, D(DstMem | SrcReg | ModRM | Mov),
2642 N, N, N, GD(0, &group9),
2643 N, N, N, N, N, N, N, N,
2645 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2647 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2649 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2662 static unsigned imm_size(struct decode_cache *c)
2666 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2672 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2673 unsigned size, bool sign_extension)
2675 struct decode_cache *c = &ctxt->decode;
2676 struct x86_emulate_ops *ops = ctxt->ops;
2677 int rc = X86EMUL_CONTINUE;
2681 op->addr.mem.ea = c->eip;
2682 /* NB. Immediates are sign-extended as necessary. */
2683 switch (op->bytes) {
2685 op->val = insn_fetch(s8, 1, c->eip);
2688 op->val = insn_fetch(s16, 2, c->eip);
2691 op->val = insn_fetch(s32, 4, c->eip);
2694 if (!sign_extension) {
2695 switch (op->bytes) {
2703 op->val &= 0xffffffff;
2712 x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
2714 struct x86_emulate_ops *ops = ctxt->ops;
2715 struct decode_cache *c = &ctxt->decode;
2716 int rc = X86EMUL_CONTINUE;
2717 int mode = ctxt->mode;
2718 int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
2719 bool op_prefix = false;
2720 struct opcode opcode, *g_mod012, *g_mod3;
2721 struct operand memop = { .type = OP_NONE };
2724 c->fetch.start = c->eip;
2725 c->fetch.end = c->fetch.start + insn_len;
2727 memcpy(c->fetch.data, insn, insn_len);
2728 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2731 case X86EMUL_MODE_REAL:
2732 case X86EMUL_MODE_VM86:
2733 case X86EMUL_MODE_PROT16:
2734 def_op_bytes = def_ad_bytes = 2;
2736 case X86EMUL_MODE_PROT32:
2737 def_op_bytes = def_ad_bytes = 4;
2739 #ifdef CONFIG_X86_64
2740 case X86EMUL_MODE_PROT64:
2749 c->op_bytes = def_op_bytes;
2750 c->ad_bytes = def_ad_bytes;
2752 /* Legacy prefixes. */
2754 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2755 case 0x66: /* operand-size override */
2757 /* switch between 2/4 bytes */
2758 c->op_bytes = def_op_bytes ^ 6;
2760 case 0x67: /* address-size override */
2761 if (mode == X86EMUL_MODE_PROT64)
2762 /* switch between 4/8 bytes */
2763 c->ad_bytes = def_ad_bytes ^ 12;
2765 /* switch between 2/4 bytes */
2766 c->ad_bytes = def_ad_bytes ^ 6;
2768 case 0x26: /* ES override */
2769 case 0x2e: /* CS override */
2770 case 0x36: /* SS override */
2771 case 0x3e: /* DS override */
2772 set_seg_override(c, (c->b >> 3) & 3);
2774 case 0x64: /* FS override */
2775 case 0x65: /* GS override */
2776 set_seg_override(c, c->b & 7);
2778 case 0x40 ... 0x4f: /* REX */
2779 if (mode != X86EMUL_MODE_PROT64)
2781 c->rex_prefix = c->b;
2783 case 0xf0: /* LOCK */
2786 case 0xf2: /* REPNE/REPNZ */
2787 case 0xf3: /* REP/REPE/REPZ */
2788 c->rep_prefix = c->b;
2794 /* Any legacy prefix after a REX prefix nullifies its effect. */
2802 if (c->rex_prefix & 8)
2803 c->op_bytes = 8; /* REX.W */
2805 /* Opcode byte(s). */
2806 opcode = opcode_table[c->b];
2807 /* Two-byte opcode? */
2810 c->b = insn_fetch(u8, 1, c->eip);
2811 opcode = twobyte_table[c->b];
2813 c->d = opcode.flags;
2816 dual = c->d & GroupDual;
2817 c->modrm = insn_fetch(u8, 1, c->eip);
2820 if (c->d & GroupDual) {
2821 g_mod012 = opcode.u.gdual->mod012;
2822 g_mod3 = opcode.u.gdual->mod3;
2824 g_mod012 = g_mod3 = opcode.u.group;
2826 c->d &= ~(Group | GroupDual);
2828 goffset = (c->modrm >> 3) & 7;
2830 if ((c->modrm >> 6) == 3)
2831 opcode = g_mod3[goffset];
2833 opcode = g_mod012[goffset];
2834 c->d |= opcode.flags;
2837 if (c->d & Prefix) {
2838 if (c->rep_prefix && op_prefix)
2839 return X86EMUL_UNHANDLEABLE;
2840 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
2841 switch (simd_prefix) {
2842 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
2843 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
2844 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
2845 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
2847 c->d |= opcode.flags;
2850 c->execute = opcode.u.execute;
2853 if (c->d == 0 || (c->d & Undefined))
2856 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
2859 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2862 if (c->d & Op3264) {
2863 if (mode == X86EMUL_MODE_PROT64)
2872 /* ModRM and SIB bytes. */
2874 rc = decode_modrm(ctxt, ops, &memop);
2875 if (!c->has_seg_override)
2876 set_seg_override(c, c->modrm_seg);
2877 } else if (c->d & MemAbs)
2878 rc = decode_abs(ctxt, ops, &memop);
2879 if (rc != X86EMUL_CONTINUE)
2882 if (!c->has_seg_override)
2883 set_seg_override(c, VCPU_SREG_DS);
2885 memop.addr.mem.seg = seg_override(ctxt, ops, c);
2887 if (memop.type == OP_MEM && c->ad_bytes != 8)
2888 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2890 if (memop.type == OP_MEM && c->rip_relative)
2891 memop.addr.mem.ea += c->eip;
2894 * Decode and fetch the source operand: register, memory
2897 switch (c->d & SrcMask) {
2901 decode_register_operand(ctxt, &c->src, c, 0);
2910 memop.bytes = (c->d & ByteOp) ? 1 :
2916 rc = decode_imm(ctxt, &c->src, 2, false);
2919 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2922 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2925 rc = decode_imm(ctxt, &c->src, 1, true);
2928 rc = decode_imm(ctxt, &c->src, 1, false);
2931 c->src.type = OP_REG;
2932 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2933 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2934 fetch_register_operand(&c->src);
2941 c->src.type = OP_MEM;
2942 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2943 c->src.addr.mem.ea =
2944 register_address(c, c->regs[VCPU_REGS_RSI]);
2945 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2949 c->src.type = OP_IMM;
2950 c->src.addr.mem.ea = c->eip;
2951 c->src.bytes = c->op_bytes + 2;
2952 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2955 memop.bytes = c->op_bytes + 2;
2960 if (rc != X86EMUL_CONTINUE)
2964 * Decode and fetch the second source operand: register, memory
2967 switch (c->d & Src2Mask) {
2972 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2975 rc = decode_imm(ctxt, &c->src2, 1, true);
2982 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2986 if (rc != X86EMUL_CONTINUE)
2989 /* Decode and fetch the destination operand: register or memory. */
2990 switch (c->d & DstMask) {
2992 decode_register_operand(ctxt, &c->dst, c,
2993 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2996 c->dst.type = OP_IMM;
2997 c->dst.addr.mem.ea = c->eip;
2999 c->dst.val = insn_fetch(u8, 1, c->eip);
3004 if ((c->d & DstMask) == DstMem64)
3007 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3009 fetch_bit_operand(c);
3010 c->dst.orig_val = c->dst.val;
3013 c->dst.type = OP_REG;
3014 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3015 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3016 fetch_register_operand(&c->dst);
3017 c->dst.orig_val = c->dst.val;
3020 c->dst.type = OP_MEM;
3021 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3022 c->dst.addr.mem.ea =
3023 register_address(c, c->regs[VCPU_REGS_RDI]);
3024 c->dst.addr.mem.seg = VCPU_SREG_ES;
3028 /* Special instructions do their own operand decoding. */
3030 c->dst.type = OP_NONE; /* Disable writeback. */
3035 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3038 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3040 struct decode_cache *c = &ctxt->decode;
3042 /* The second termination condition only applies for REPE
3043 * and REPNE. Test if the repeat string operation prefix is
3044 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3045 * corresponding termination condition according to:
3046 * - if REPE/REPZ and ZF = 0 then done
3047 * - if REPNE/REPNZ and ZF = 1 then done
3049 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3050 (c->b == 0xae) || (c->b == 0xaf))
3051 && (((c->rep_prefix == REPE_PREFIX) &&
3052 ((ctxt->eflags & EFLG_ZF) == 0))
3053 || ((c->rep_prefix == REPNE_PREFIX) &&
3054 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3061 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3063 struct x86_emulate_ops *ops = ctxt->ops;
3065 struct decode_cache *c = &ctxt->decode;
3066 int rc = X86EMUL_CONTINUE;
3067 int saved_dst_type = c->dst.type;
3068 int irq; /* Used for int 3, int, and into */
3070 ctxt->decode.mem_read.pos = 0;
3072 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3073 rc = emulate_ud(ctxt);
3077 /* LOCK prefix is allowed only with some instructions */
3078 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3079 rc = emulate_ud(ctxt);
3083 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3084 rc = emulate_ud(ctxt);
3089 && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM)
3090 || !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) {
3091 rc = emulate_ud(ctxt);
3095 if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) {
3096 rc = emulate_nm(ctxt);
3100 /* Privileged instruction can be executed only in CPL=0 */
3101 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3102 rc = emulate_gp(ctxt, 0);
3106 if (c->rep_prefix && (c->d & String)) {
3107 /* All REP prefixes have the same first termination condition */
3108 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3114 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3115 rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3116 c->src.valptr, c->src.bytes);
3117 if (rc != X86EMUL_CONTINUE)
3119 c->src.orig_val64 = c->src.val64;
3122 if (c->src2.type == OP_MEM) {
3123 rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3124 &c->src2.val, c->src2.bytes);
3125 if (rc != X86EMUL_CONTINUE)
3129 if ((c->d & DstMask) == ImplicitOps)
3133 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3134 /* optimisation - avoid slow emulated read if Mov */
3135 rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3136 &c->dst.val, c->dst.bytes);
3137 if (rc != X86EMUL_CONTINUE)
3140 c->dst.orig_val = c->dst.val;
3145 rc = c->execute(ctxt);
3146 if (rc != X86EMUL_CONTINUE)
3157 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3159 case 0x06: /* push es */
3160 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3162 case 0x07: /* pop es */
3163 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3167 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3169 case 0x0e: /* push cs */
3170 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3174 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3176 case 0x16: /* push ss */
3177 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3179 case 0x17: /* pop ss */
3180 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3184 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3186 case 0x1e: /* push ds */
3187 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3189 case 0x1f: /* pop ds */
3190 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3194 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3198 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3202 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3206 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3208 case 0x40 ... 0x47: /* inc r16/r32 */
3209 emulate_1op("inc", c->dst, ctxt->eflags);
3211 case 0x48 ... 0x4f: /* dec r16/r32 */
3212 emulate_1op("dec", c->dst, ctxt->eflags);
3214 case 0x58 ... 0x5f: /* pop reg */
3216 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3218 case 0x60: /* pusha */
3219 rc = emulate_pusha(ctxt, ops);
3221 case 0x61: /* popa */
3222 rc = emulate_popa(ctxt, ops);
3224 case 0x63: /* movsxd */
3225 if (ctxt->mode != X86EMUL_MODE_PROT64)
3226 goto cannot_emulate;
3227 c->dst.val = (s32) c->src.val;
3229 case 0x6c: /* insb */
3230 case 0x6d: /* insw/insd */
3231 c->src.val = c->regs[VCPU_REGS_RDX];
3233 case 0x6e: /* outsb */
3234 case 0x6f: /* outsw/outsd */
3235 c->dst.val = c->regs[VCPU_REGS_RDX];
3238 case 0x70 ... 0x7f: /* jcc (short) */
3239 if (test_cc(c->b, ctxt->eflags))
3240 jmp_rel(c, c->src.val);
3242 case 0x80 ... 0x83: /* Grp1 */
3243 switch (c->modrm_reg) {
3264 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3266 case 0x86 ... 0x87: /* xchg */
3268 /* Write back the register source. */
3269 c->src.val = c->dst.val;
3270 write_register_operand(&c->src);
3272 * Write back the memory destination with implicit LOCK
3275 c->dst.val = c->src.orig_val;
3278 case 0x8c: /* mov r/m, sreg */
3279 if (c->modrm_reg > VCPU_SREG_GS) {
3280 rc = emulate_ud(ctxt);
3283 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3285 case 0x8d: /* lea r16/r32, m */
3286 c->dst.val = c->src.addr.mem.ea;
3288 case 0x8e: { /* mov seg, r/m16 */
3293 if (c->modrm_reg == VCPU_SREG_CS ||
3294 c->modrm_reg > VCPU_SREG_GS) {
3295 rc = emulate_ud(ctxt);
3299 if (c->modrm_reg == VCPU_SREG_SS)
3300 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3302 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3304 c->dst.type = OP_NONE; /* Disable writeback. */
3307 case 0x8f: /* pop (sole member of Grp1a) */
3308 rc = emulate_grp1a(ctxt, ops);
3310 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3311 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3314 case 0x98: /* cbw/cwde/cdqe */
3315 switch (c->op_bytes) {
3316 case 2: c->dst.val = (s8)c->dst.val; break;
3317 case 4: c->dst.val = (s16)c->dst.val; break;
3318 case 8: c->dst.val = (s32)c->dst.val; break;
3321 case 0x9c: /* pushf */
3322 c->src.val = (unsigned long) ctxt->eflags;
3323 emulate_push(ctxt, ops);
3325 case 0x9d: /* popf */
3326 c->dst.type = OP_REG;
3327 c->dst.addr.reg = &ctxt->eflags;
3328 c->dst.bytes = c->op_bytes;
3329 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3331 case 0xa6 ... 0xa7: /* cmps */
3332 c->dst.type = OP_NONE; /* Disable writeback. */
3334 case 0xa8 ... 0xa9: /* test ax, imm */
3336 case 0xae ... 0xaf: /* scas */
3341 case 0xc3: /* ret */
3342 c->dst.type = OP_REG;
3343 c->dst.addr.reg = &c->eip;
3344 c->dst.bytes = c->op_bytes;
3345 goto pop_instruction;
3346 case 0xc4: /* les */
3347 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3349 case 0xc5: /* lds */
3350 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3352 case 0xcb: /* ret far */
3353 rc = emulate_ret_far(ctxt, ops);
3355 case 0xcc: /* int3 */
3358 case 0xcd: /* int n */
3361 rc = emulate_int(ctxt, ops, irq);
3363 case 0xce: /* into */
3364 if (ctxt->eflags & EFLG_OF) {
3369 case 0xcf: /* iret */
3370 rc = emulate_iret(ctxt, ops);
3372 case 0xd0 ... 0xd1: /* Grp2 */
3375 case 0xd2 ... 0xd3: /* Grp2 */
3376 c->src.val = c->regs[VCPU_REGS_RCX];
3379 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3380 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3381 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3382 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3383 jmp_rel(c, c->src.val);
3385 case 0xe3: /* jcxz/jecxz/jrcxz */
3386 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3387 jmp_rel(c, c->src.val);
3389 case 0xe4: /* inb */
3392 case 0xe6: /* outb */
3393 case 0xe7: /* out */
3395 case 0xe8: /* call (near) */ {
3396 long int rel = c->src.val;
3397 c->src.val = (unsigned long) c->eip;
3399 emulate_push(ctxt, ops);
3402 case 0xe9: /* jmp rel */
3404 case 0xea: { /* jmp far */
3407 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3409 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3413 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3417 jmp: /* jmp rel short */
3418 jmp_rel(c, c->src.val);
3419 c->dst.type = OP_NONE; /* Disable writeback. */
3421 case 0xec: /* in al,dx */
3422 case 0xed: /* in (e/r)ax,dx */
3423 c->src.val = c->regs[VCPU_REGS_RDX];
3425 c->dst.bytes = min(c->dst.bytes, 4u);
3426 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3427 rc = emulate_gp(ctxt, 0);
3430 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3432 goto done; /* IO is needed */
3434 case 0xee: /* out dx,al */
3435 case 0xef: /* out dx,(e/r)ax */
3436 c->dst.val = c->regs[VCPU_REGS_RDX];
3438 c->src.bytes = min(c->src.bytes, 4u);
3439 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3441 rc = emulate_gp(ctxt, 0);
3444 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3445 &c->src.val, 1, ctxt->vcpu);
3446 c->dst.type = OP_NONE; /* Disable writeback. */
3448 case 0xf4: /* hlt */
3449 ctxt->vcpu->arch.halt_request = 1;
3451 case 0xf5: /* cmc */
3452 /* complement carry flag from eflags reg */
3453 ctxt->eflags ^= EFLG_CF;
3455 case 0xf6 ... 0xf7: /* Grp3 */
3456 rc = emulate_grp3(ctxt, ops);
3458 case 0xf8: /* clc */
3459 ctxt->eflags &= ~EFLG_CF;
3461 case 0xf9: /* stc */
3462 ctxt->eflags |= EFLG_CF;
3464 case 0xfa: /* cli */
3465 if (emulator_bad_iopl(ctxt, ops)) {
3466 rc = emulate_gp(ctxt, 0);
3469 ctxt->eflags &= ~X86_EFLAGS_IF;
3471 case 0xfb: /* sti */
3472 if (emulator_bad_iopl(ctxt, ops)) {
3473 rc = emulate_gp(ctxt, 0);
3476 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3477 ctxt->eflags |= X86_EFLAGS_IF;
3480 case 0xfc: /* cld */
3481 ctxt->eflags &= ~EFLG_DF;
3483 case 0xfd: /* std */
3484 ctxt->eflags |= EFLG_DF;
3486 case 0xfe: /* Grp4 */
3488 rc = emulate_grp45(ctxt, ops);
3490 case 0xff: /* Grp5 */
3491 if (c->modrm_reg == 5)
3495 goto cannot_emulate;
3498 if (rc != X86EMUL_CONTINUE)
3502 rc = writeback(ctxt, ops);
3503 if (rc != X86EMUL_CONTINUE)
3507 * restore dst type in case the decoding will be reused
3508 * (happens for string instruction )
3510 c->dst.type = saved_dst_type;
3512 if ((c->d & SrcMask) == SrcSI)
3513 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3514 VCPU_REGS_RSI, &c->src);
3516 if ((c->d & DstMask) == DstDI)
3517 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3520 if (c->rep_prefix && (c->d & String)) {
3521 struct read_cache *r = &ctxt->decode.io_read;
3522 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3524 if (!string_insn_completed(ctxt)) {
3526 * Re-enter guest when pio read ahead buffer is empty
3527 * or, if it is not used, after each 1024 iteration.
3529 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3530 (r->end == 0 || r->end != r->pos)) {
3532 * Reset read cache. Usually happens before
3533 * decode, but since instruction is restarted
3534 * we have to do it here.
3536 ctxt->decode.mem_read.end = 0;
3537 return EMULATION_RESTART;
3539 goto done; /* skip rip writeback */
3546 if (rc == X86EMUL_PROPAGATE_FAULT)
3547 ctxt->have_exception = true;
3548 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3552 case 0x01: /* lgdt, lidt, lmsw */
3553 switch (c->modrm_reg) {
3555 unsigned long address;
3557 case 0: /* vmcall */
3558 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3559 goto cannot_emulate;
3561 rc = kvm_fix_hypercall(ctxt->vcpu);
3562 if (rc != X86EMUL_CONTINUE)
3565 /* Let the processor re-execute the fixed hypercall */
3567 /* Disable writeback. */
3568 c->dst.type = OP_NONE;
3571 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3572 &size, &address, c->op_bytes);
3573 if (rc != X86EMUL_CONTINUE)
3575 realmode_lgdt(ctxt->vcpu, size, address);
3576 /* Disable writeback. */
3577 c->dst.type = OP_NONE;
3579 case 3: /* lidt/vmmcall */
3580 if (c->modrm_mod == 3) {
3581 switch (c->modrm_rm) {
3583 rc = kvm_fix_hypercall(ctxt->vcpu);
3586 goto cannot_emulate;
3589 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3592 if (rc != X86EMUL_CONTINUE)
3594 realmode_lidt(ctxt->vcpu, size, address);
3596 /* Disable writeback. */
3597 c->dst.type = OP_NONE;
3601 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3604 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3605 (c->src.val & 0x0f), ctxt->vcpu);
3606 c->dst.type = OP_NONE;
3608 case 5: /* not defined */
3610 rc = X86EMUL_PROPAGATE_FAULT;
3613 emulate_invlpg(ctxt->vcpu,
3614 linear(ctxt, c->src.addr.mem));
3615 /* Disable writeback. */
3616 c->dst.type = OP_NONE;
3619 goto cannot_emulate;
3622 case 0x05: /* syscall */
3623 rc = emulate_syscall(ctxt, ops);
3626 emulate_clts(ctxt->vcpu);
3628 case 0x09: /* wbinvd */
3629 kvm_emulate_wbinvd(ctxt->vcpu);
3631 case 0x08: /* invd */
3632 case 0x0d: /* GrpP (prefetch) */
3633 case 0x18: /* Grp16 (prefetch/nop) */
3635 case 0x20: /* mov cr, reg */
3636 switch (c->modrm_reg) {
3641 rc = X86EMUL_PROPAGATE_FAULT;
3644 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3646 case 0x21: /* mov from dr to reg */
3647 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3648 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3650 rc = X86EMUL_PROPAGATE_FAULT;
3653 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3655 case 0x22: /* mov reg, cr */
3656 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3657 emulate_gp(ctxt, 0);
3658 rc = X86EMUL_PROPAGATE_FAULT;
3661 c->dst.type = OP_NONE;
3663 case 0x23: /* mov from reg to dr */
3664 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3665 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3667 rc = X86EMUL_PROPAGATE_FAULT;
3671 if (ops->set_dr(c->modrm_reg, c->src.val &
3672 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3673 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3674 /* #UD condition is already handled by the code above */
3675 emulate_gp(ctxt, 0);
3676 rc = X86EMUL_PROPAGATE_FAULT;
3680 c->dst.type = OP_NONE; /* no writeback */
3684 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3685 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3686 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3687 emulate_gp(ctxt, 0);
3688 rc = X86EMUL_PROPAGATE_FAULT;
3691 rc = X86EMUL_CONTINUE;
3695 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3696 emulate_gp(ctxt, 0);
3697 rc = X86EMUL_PROPAGATE_FAULT;
3700 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3701 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3703 rc = X86EMUL_CONTINUE;
3705 case 0x34: /* sysenter */
3706 rc = emulate_sysenter(ctxt, ops);
3708 case 0x35: /* sysexit */
3709 rc = emulate_sysexit(ctxt, ops);
3711 case 0x40 ... 0x4f: /* cmov */
3712 c->dst.val = c->dst.orig_val = c->src.val;
3713 if (!test_cc(c->b, ctxt->eflags))
3714 c->dst.type = OP_NONE; /* no writeback */
3716 case 0x80 ... 0x8f: /* jnz rel, etc*/
3717 if (test_cc(c->b, ctxt->eflags))
3718 jmp_rel(c, c->src.val);
3720 case 0x90 ... 0x9f: /* setcc r/m8 */
3721 c->dst.val = test_cc(c->b, ctxt->eflags);
3723 case 0xa0: /* push fs */
3724 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3726 case 0xa1: /* pop fs */
3727 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3731 c->dst.type = OP_NONE;
3732 /* only subword offset */
3733 c->src.val &= (c->dst.bytes << 3) - 1;
3734 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3736 case 0xa4: /* shld imm8, r, r/m */
3737 case 0xa5: /* shld cl, r, r/m */
3738 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3740 case 0xa8: /* push gs */
3741 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3743 case 0xa9: /* pop gs */
3744 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3748 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3750 case 0xac: /* shrd imm8, r, r/m */
3751 case 0xad: /* shrd cl, r, r/m */
3752 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3754 case 0xae: /* clflush */
3756 case 0xb0 ... 0xb1: /* cmpxchg */
3758 * Save real source value, then compare EAX against
3761 c->src.orig_val = c->src.val;
3762 c->src.val = c->regs[VCPU_REGS_RAX];
3763 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3764 if (ctxt->eflags & EFLG_ZF) {
3765 /* Success: write back to memory. */
3766 c->dst.val = c->src.orig_val;
3768 /* Failure: write the value we saw to EAX. */
3769 c->dst.type = OP_REG;
3770 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3773 case 0xb2: /* lss */
3774 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3778 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3780 case 0xb4: /* lfs */
3781 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3783 case 0xb5: /* lgs */
3784 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3786 case 0xb6 ... 0xb7: /* movzx */
3787 c->dst.bytes = c->op_bytes;
3788 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3791 case 0xba: /* Grp8 */
3792 switch (c->modrm_reg & 3) {
3805 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3807 case 0xbc: { /* bsf */
3809 __asm__ ("bsf %2, %0; setz %1"
3810 : "=r"(c->dst.val), "=q"(zf)
3812 ctxt->eflags &= ~X86_EFLAGS_ZF;
3814 ctxt->eflags |= X86_EFLAGS_ZF;
3815 c->dst.type = OP_NONE; /* Disable writeback. */
3819 case 0xbd: { /* bsr */
3821 __asm__ ("bsr %2, %0; setz %1"
3822 : "=r"(c->dst.val), "=q"(zf)
3824 ctxt->eflags &= ~X86_EFLAGS_ZF;
3826 ctxt->eflags |= X86_EFLAGS_ZF;
3827 c->dst.type = OP_NONE; /* Disable writeback. */
3831 case 0xbe ... 0xbf: /* movsx */
3832 c->dst.bytes = c->op_bytes;
3833 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3836 case 0xc0 ... 0xc1: /* xadd */
3837 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3838 /* Write back the register source. */
3839 c->src.val = c->dst.orig_val;
3840 write_register_operand(&c->src);
3842 case 0xc3: /* movnti */
3843 c->dst.bytes = c->op_bytes;
3844 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3847 case 0xc7: /* Grp9 (cmpxchg8b) */
3848 rc = emulate_grp9(ctxt, ops);
3851 goto cannot_emulate;
3854 if (rc != X86EMUL_CONTINUE)