1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
81 #define VendorSpecific (1<<22) /* Vendor specific instruction */
82 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
83 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
84 #define Undefined (1<<25) /* No Such Instruction */
85 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
86 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
88 /* Source 2 operand type */
89 #define Src2None (0<<29)
90 #define Src2CL (1<<29)
91 #define Src2ImmByte (2<<29)
92 #define Src2One (3<<29)
93 #define Src2Imm (4<<29)
94 #define Src2Mask (7<<29)
97 #define X3(x...) X2(x), x
98 #define X4(x...) X2(x), X2(x)
99 #define X5(x...) X4(x), x
100 #define X6(x...) X4(x), X2(x)
101 #define X7(x...) X4(x), X3(x)
102 #define X8(x...) X4(x), X4(x)
103 #define X16(x...) X8(x), X8(x)
108 int (*execute)(struct x86_emulate_ctxt *ctxt);
109 struct opcode *group;
110 struct group_dual *gdual;
111 struct gprefix *gprefix;
116 struct opcode mod012[8];
117 struct opcode mod3[8];
121 struct opcode pfx_no;
122 struct opcode pfx_66;
123 struct opcode pfx_f2;
124 struct opcode pfx_f3;
127 /* EFLAGS bit definitions. */
128 #define EFLG_ID (1<<21)
129 #define EFLG_VIP (1<<20)
130 #define EFLG_VIF (1<<19)
131 #define EFLG_AC (1<<18)
132 #define EFLG_VM (1<<17)
133 #define EFLG_RF (1<<16)
134 #define EFLG_IOPL (3<<12)
135 #define EFLG_NT (1<<14)
136 #define EFLG_OF (1<<11)
137 #define EFLG_DF (1<<10)
138 #define EFLG_IF (1<<9)
139 #define EFLG_TF (1<<8)
140 #define EFLG_SF (1<<7)
141 #define EFLG_ZF (1<<6)
142 #define EFLG_AF (1<<4)
143 #define EFLG_PF (1<<2)
144 #define EFLG_CF (1<<0)
146 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
147 #define EFLG_RESERVED_ONE_MASK 2
150 * Instruction emulation:
151 * Most instructions are emulated directly via a fragment of inline assembly
152 * code. This allows us to save/restore EFLAGS and thus very easily pick up
153 * any modified flags.
156 #if defined(CONFIG_X86_64)
157 #define _LO32 "k" /* force 32-bit operand */
158 #define _STK "%%rsp" /* stack pointer */
159 #elif defined(__i386__)
160 #define _LO32 "" /* force 32-bit operand */
161 #define _STK "%%esp" /* stack pointer */
165 * These EFLAGS bits are restored from saved value during emulation, and
166 * any changes are written back to the saved value after emulation.
168 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
170 /* Before executing instruction: restore necessary bits in EFLAGS. */
171 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
172 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
173 "movl %"_sav",%"_LO32 _tmp"; " \
176 "movl %"_msk",%"_LO32 _tmp"; " \
177 "andl %"_LO32 _tmp",("_STK"); " \
179 "notl %"_LO32 _tmp"; " \
180 "andl %"_LO32 _tmp",("_STK"); " \
181 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
183 "orl %"_LO32 _tmp",("_STK"); " \
187 /* After executing instruction: write-back necessary bits in EFLAGS. */
188 #define _POST_EFLAGS(_sav, _msk, _tmp) \
189 /* _sav |= EFLAGS & _msk; */ \
192 "andl %"_msk",%"_LO32 _tmp"; " \
193 "orl %"_LO32 _tmp",%"_sav"; "
201 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
203 __asm__ __volatile__ ( \
204 _PRE_EFLAGS("0", "4", "2") \
205 _op _suffix " %"_x"3,%1; " \
206 _POST_EFLAGS("0", "4", "2") \
207 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
209 : _y ((_src).val), "i" (EFLAGS_MASK)); \
213 /* Raw emulation: instruction has two explicit operands. */
214 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
216 unsigned long _tmp; \
218 switch ((_dst).bytes) { \
220 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
223 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
226 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
231 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
233 unsigned long _tmp; \
234 switch ((_dst).bytes) { \
236 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
239 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
240 _wx, _wy, _lx, _ly, _qx, _qy); \
245 /* Source operand is byte-sized and may be restricted to just %cl. */
246 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
247 __emulate_2op(_op, _src, _dst, _eflags, \
248 "b", "c", "b", "c", "b", "c", "b", "c")
250 /* Source operand is byte, word, long or quad sized. */
251 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
252 __emulate_2op(_op, _src, _dst, _eflags, \
253 "b", "q", "w", "r", _LO32, "r", "", "r")
255 /* Source operand is word, long or quad sized. */
256 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
257 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
258 "w", "r", _LO32, "r", "", "r")
260 /* Instruction has three operands and one operand is stored in ECX register */
261 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
263 unsigned long _tmp; \
264 _type _clv = (_cl).val; \
265 _type _srcv = (_src).val; \
266 _type _dstv = (_dst).val; \
268 __asm__ __volatile__ ( \
269 _PRE_EFLAGS("0", "5", "2") \
270 _op _suffix " %4,%1 \n" \
271 _POST_EFLAGS("0", "5", "2") \
272 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
273 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
276 (_cl).val = (unsigned long) _clv; \
277 (_src).val = (unsigned long) _srcv; \
278 (_dst).val = (unsigned long) _dstv; \
281 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
283 switch ((_dst).bytes) { \
285 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
286 "w", unsigned short); \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "l", unsigned int); \
293 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "q", unsigned long)); \
299 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
301 unsigned long _tmp; \
303 __asm__ __volatile__ ( \
304 _PRE_EFLAGS("0", "3", "2") \
305 _op _suffix " %1; " \
306 _POST_EFLAGS("0", "3", "2") \
307 : "=m" (_eflags), "+m" ((_dst).val), \
309 : "i" (EFLAGS_MASK)); \
312 /* Instruction has only one explicit operand (no source operand). */
313 #define emulate_1op(_op, _dst, _eflags) \
315 switch ((_dst).bytes) { \
316 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
317 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
318 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
319 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
323 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
325 unsigned long _tmp; \
327 __asm__ __volatile__ ( \
328 _PRE_EFLAGS("0", "4", "1") \
329 _op _suffix " %5; " \
330 _POST_EFLAGS("0", "4", "1") \
331 : "=m" (_eflags), "=&r" (_tmp), \
332 "+a" (_rax), "+d" (_rdx) \
333 : "i" (EFLAGS_MASK), "m" ((_src).val), \
334 "a" (_rax), "d" (_rdx)); \
337 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
339 unsigned long _tmp; \
341 __asm__ __volatile__ ( \
342 _PRE_EFLAGS("0", "5", "1") \
344 _op _suffix " %6; " \
346 _POST_EFLAGS("0", "5", "1") \
347 ".pushsection .fixup,\"ax\" \n\t" \
348 "3: movb $1, %4 \n\t" \
351 _ASM_EXTABLE(1b, 3b) \
352 : "=m" (_eflags), "=&r" (_tmp), \
353 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
354 : "i" (EFLAGS_MASK), "m" ((_src).val), \
355 "a" (_rax), "d" (_rdx)); \
358 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
359 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
361 switch((_src).bytes) { \
362 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
363 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
364 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
365 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
369 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
371 switch((_src).bytes) { \
373 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
374 _eflags, "b", _ex); \
377 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
378 _eflags, "w", _ex); \
381 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
382 _eflags, "l", _ex); \
385 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
386 _eflags, "q", _ex)); \
391 /* Fetch next part of the instruction being emulated. */
392 #define insn_fetch(_type, _size, _eip) \
393 ({ unsigned long _x; \
394 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
395 if (rc != X86EMUL_CONTINUE) \
401 #define insn_fetch_arr(_arr, _size, _eip) \
402 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
403 if (rc != X86EMUL_CONTINUE) \
408 static inline unsigned long ad_mask(struct decode_cache *c)
410 return (1UL << (c->ad_bytes << 3)) - 1;
413 /* Access/update address held in a register, based on addressing mode. */
414 static inline unsigned long
415 address_mask(struct decode_cache *c, unsigned long reg)
417 if (c->ad_bytes == sizeof(unsigned long))
420 return reg & ad_mask(c);
423 static inline unsigned long
424 register_address(struct decode_cache *c, unsigned long reg)
426 return address_mask(c, reg);
430 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
432 if (c->ad_bytes == sizeof(unsigned long))
435 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
438 static inline void jmp_rel(struct decode_cache *c, int rel)
440 register_address_increment(c, &c->eip, rel);
443 static void set_seg_override(struct decode_cache *c, int seg)
445 c->has_seg_override = true;
446 c->seg_override = seg;
449 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
450 struct x86_emulate_ops *ops, int seg)
452 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
455 return ops->get_cached_segment_base(seg, ctxt->vcpu);
458 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
459 struct x86_emulate_ops *ops,
460 struct decode_cache *c)
462 if (!c->has_seg_override)
465 return c->seg_override;
468 static ulong linear(struct x86_emulate_ctxt *ctxt,
469 struct segmented_address addr)
471 struct decode_cache *c = &ctxt->decode;
474 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
475 if (c->ad_bytes != 8)
480 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
481 u32 error, bool valid)
483 ctxt->exception.vector = vec;
484 ctxt->exception.error_code = error;
485 ctxt->exception.error_code_valid = valid;
486 return X86EMUL_PROPAGATE_FAULT;
489 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
491 return emulate_exception(ctxt, GP_VECTOR, err, true);
494 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
496 return emulate_exception(ctxt, UD_VECTOR, 0, false);
499 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
501 return emulate_exception(ctxt, TS_VECTOR, err, true);
504 static int emulate_de(struct x86_emulate_ctxt *ctxt)
506 return emulate_exception(ctxt, DE_VECTOR, 0, false);
509 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
511 return emulate_exception(ctxt, NM_VECTOR, 0, false);
514 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
515 struct x86_emulate_ops *ops,
516 unsigned long eip, u8 *dest)
518 struct fetch_cache *fc = &ctxt->decode.fetch;
522 if (eip == fc->end) {
523 cur_size = fc->end - fc->start;
524 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
525 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
526 size, ctxt->vcpu, &ctxt->exception);
527 if (rc != X86EMUL_CONTINUE)
531 *dest = fc->data[eip - fc->start];
532 return X86EMUL_CONTINUE;
535 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
536 struct x86_emulate_ops *ops,
537 unsigned long eip, void *dest, unsigned size)
541 /* x86 instructions are limited to 15 bytes. */
542 if (eip + size - ctxt->eip > 15)
543 return X86EMUL_UNHANDLEABLE;
545 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
546 if (rc != X86EMUL_CONTINUE)
549 return X86EMUL_CONTINUE;
553 * Given the 'reg' portion of a ModRM byte, and a register block, return a
554 * pointer into the block that addresses the relevant register.
555 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
557 static void *decode_register(u8 modrm_reg, unsigned long *regs,
562 p = ®s[modrm_reg];
563 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
564 p = (unsigned char *)®s[modrm_reg & 3] + 1;
568 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
569 struct x86_emulate_ops *ops,
570 struct segmented_address addr,
571 u16 *size, unsigned long *address, int op_bytes)
578 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
579 ctxt->vcpu, &ctxt->exception);
580 if (rc != X86EMUL_CONTINUE)
583 rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
584 ctxt->vcpu, &ctxt->exception);
588 static int test_cc(unsigned int condition, unsigned int flags)
592 switch ((condition & 15) >> 1) {
594 rc |= (flags & EFLG_OF);
596 case 1: /* b/c/nae */
597 rc |= (flags & EFLG_CF);
600 rc |= (flags & EFLG_ZF);
603 rc |= (flags & (EFLG_CF|EFLG_ZF));
606 rc |= (flags & EFLG_SF);
609 rc |= (flags & EFLG_PF);
612 rc |= (flags & EFLG_ZF);
615 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
619 /* Odd condition identifiers (lsb == 1) have inverted sense. */
620 return (!!rc ^ (condition & 1));
623 static void fetch_register_operand(struct operand *op)
627 op->val = *(u8 *)op->addr.reg;
630 op->val = *(u16 *)op->addr.reg;
633 op->val = *(u32 *)op->addr.reg;
636 op->val = *(u64 *)op->addr.reg;
641 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
643 ctxt->ops->get_fpu(ctxt);
645 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
646 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
647 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
648 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
649 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
650 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
651 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
652 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
654 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
655 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
656 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
657 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
658 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
659 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
660 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
661 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
665 ctxt->ops->put_fpu(ctxt);
668 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
671 ctxt->ops->get_fpu(ctxt);
673 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
674 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
675 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
676 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
677 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
678 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
679 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
680 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
682 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
683 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
684 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
685 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
686 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
687 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
688 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
689 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
693 ctxt->ops->put_fpu(ctxt);
696 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
698 struct decode_cache *c,
701 unsigned reg = c->modrm_reg;
702 int highbyte_regs = c->rex_prefix == 0;
705 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
711 read_sse_reg(ctxt, &op->vec_val, reg);
716 if ((c->d & ByteOp) && !inhibit_bytereg) {
717 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
720 op->addr.reg = decode_register(reg, c->regs, 0);
721 op->bytes = c->op_bytes;
723 fetch_register_operand(op);
724 op->orig_val = op->val;
727 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
728 struct x86_emulate_ops *ops,
731 struct decode_cache *c = &ctxt->decode;
733 int index_reg = 0, base_reg = 0, scale;
734 int rc = X86EMUL_CONTINUE;
738 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
739 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
740 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
743 c->modrm = insn_fetch(u8, 1, c->eip);
744 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
745 c->modrm_reg |= (c->modrm & 0x38) >> 3;
746 c->modrm_rm |= (c->modrm & 0x07);
747 c->modrm_seg = VCPU_SREG_DS;
749 if (c->modrm_mod == 3) {
751 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
752 op->addr.reg = decode_register(c->modrm_rm,
753 c->regs, c->d & ByteOp);
757 op->addr.xmm = c->modrm_rm;
758 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
761 fetch_register_operand(op);
767 if (c->ad_bytes == 2) {
768 unsigned bx = c->regs[VCPU_REGS_RBX];
769 unsigned bp = c->regs[VCPU_REGS_RBP];
770 unsigned si = c->regs[VCPU_REGS_RSI];
771 unsigned di = c->regs[VCPU_REGS_RDI];
773 /* 16-bit ModR/M decode. */
774 switch (c->modrm_mod) {
776 if (c->modrm_rm == 6)
777 modrm_ea += insn_fetch(u16, 2, c->eip);
780 modrm_ea += insn_fetch(s8, 1, c->eip);
783 modrm_ea += insn_fetch(u16, 2, c->eip);
786 switch (c->modrm_rm) {
806 if (c->modrm_mod != 0)
813 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
814 (c->modrm_rm == 6 && c->modrm_mod != 0))
815 c->modrm_seg = VCPU_SREG_SS;
816 modrm_ea = (u16)modrm_ea;
818 /* 32/64-bit ModR/M decode. */
819 if ((c->modrm_rm & 7) == 4) {
820 sib = insn_fetch(u8, 1, c->eip);
821 index_reg |= (sib >> 3) & 7;
825 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
826 modrm_ea += insn_fetch(s32, 4, c->eip);
828 modrm_ea += c->regs[base_reg];
830 modrm_ea += c->regs[index_reg] << scale;
831 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
832 if (ctxt->mode == X86EMUL_MODE_PROT64)
835 modrm_ea += c->regs[c->modrm_rm];
836 switch (c->modrm_mod) {
838 if (c->modrm_rm == 5)
839 modrm_ea += insn_fetch(s32, 4, c->eip);
842 modrm_ea += insn_fetch(s8, 1, c->eip);
845 modrm_ea += insn_fetch(s32, 4, c->eip);
849 op->addr.mem.ea = modrm_ea;
854 static int decode_abs(struct x86_emulate_ctxt *ctxt,
855 struct x86_emulate_ops *ops,
858 struct decode_cache *c = &ctxt->decode;
859 int rc = X86EMUL_CONTINUE;
862 switch (c->ad_bytes) {
864 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
867 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
870 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
877 static void fetch_bit_operand(struct decode_cache *c)
881 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
882 mask = ~(c->dst.bytes * 8 - 1);
884 if (c->src.bytes == 2)
885 sv = (s16)c->src.val & (s16)mask;
886 else if (c->src.bytes == 4)
887 sv = (s32)c->src.val & (s32)mask;
889 c->dst.addr.mem.ea += (sv >> 3);
892 /* only subword offset */
893 c->src.val &= (c->dst.bytes << 3) - 1;
896 static int read_emulated(struct x86_emulate_ctxt *ctxt,
897 struct x86_emulate_ops *ops,
898 unsigned long addr, void *dest, unsigned size)
901 struct read_cache *mc = &ctxt->decode.mem_read;
904 int n = min(size, 8u);
906 if (mc->pos < mc->end)
909 rc = ops->read_emulated(addr, mc->data + mc->end, n,
910 &ctxt->exception, ctxt->vcpu);
911 if (rc != X86EMUL_CONTINUE)
916 memcpy(dest, mc->data + mc->pos, n);
921 return X86EMUL_CONTINUE;
924 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
925 struct x86_emulate_ops *ops,
926 unsigned int size, unsigned short port,
929 struct read_cache *rc = &ctxt->decode.io_read;
931 if (rc->pos == rc->end) { /* refill pio read ahead */
932 struct decode_cache *c = &ctxt->decode;
933 unsigned int in_page, n;
934 unsigned int count = c->rep_prefix ?
935 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
936 in_page = (ctxt->eflags & EFLG_DF) ?
937 offset_in_page(c->regs[VCPU_REGS_RDI]) :
938 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
939 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
943 rc->pos = rc->end = 0;
944 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
949 memcpy(dest, rc->data + rc->pos, size);
954 static u32 desc_limit_scaled(struct desc_struct *desc)
956 u32 limit = get_desc_limit(desc);
958 return desc->g ? (limit << 12) | 0xfff : limit;
961 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
962 struct x86_emulate_ops *ops,
963 u16 selector, struct desc_ptr *dt)
965 if (selector & 1 << 2) {
966 struct desc_struct desc;
967 memset (dt, 0, sizeof *dt);
968 if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
972 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
973 dt->address = get_desc_base(&desc);
975 ops->get_gdt(dt, ctxt->vcpu);
978 /* allowed just for 8 bytes segments */
979 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
980 struct x86_emulate_ops *ops,
981 u16 selector, struct desc_struct *desc)
984 u16 index = selector >> 3;
988 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
990 if (dt.size < index * 8 + 7)
991 return emulate_gp(ctxt, selector & 0xfffc);
992 addr = dt.address + index * 8;
993 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
999 /* allowed just for 8 bytes segments */
1000 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1001 struct x86_emulate_ops *ops,
1002 u16 selector, struct desc_struct *desc)
1005 u16 index = selector >> 3;
1009 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1011 if (dt.size < index * 8 + 7)
1012 return emulate_gp(ctxt, selector & 0xfffc);
1014 addr = dt.address + index * 8;
1015 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
1021 /* Does not support long mode */
1022 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1023 struct x86_emulate_ops *ops,
1024 u16 selector, int seg)
1026 struct desc_struct seg_desc;
1028 unsigned err_vec = GP_VECTOR;
1030 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1033 memset(&seg_desc, 0, sizeof seg_desc);
1035 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1036 || ctxt->mode == X86EMUL_MODE_REAL) {
1037 /* set real mode segment descriptor */
1038 set_desc_base(&seg_desc, selector << 4);
1039 set_desc_limit(&seg_desc, 0xffff);
1046 /* NULL selector is not valid for TR, CS and SS */
1047 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1051 /* TR should be in GDT only */
1052 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1055 if (null_selector) /* for NULL selector skip all following checks */
1058 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1059 if (ret != X86EMUL_CONTINUE)
1062 err_code = selector & 0xfffc;
1063 err_vec = GP_VECTOR;
1065 /* can't load system descriptor into segment selecor */
1066 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1070 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1076 cpl = ops->cpl(ctxt->vcpu);
1081 * segment is not a writable data segment or segment
1082 * selector's RPL != CPL or segment selector's RPL != CPL
1084 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1088 if (!(seg_desc.type & 8))
1091 if (seg_desc.type & 4) {
1097 if (rpl > cpl || dpl != cpl)
1100 /* CS(RPL) <- CPL */
1101 selector = (selector & 0xfffc) | cpl;
1104 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1107 case VCPU_SREG_LDTR:
1108 if (seg_desc.s || seg_desc.type != 2)
1111 default: /* DS, ES, FS, or GS */
1113 * segment is not a data or readable code segment or
1114 * ((segment is a data or nonconforming code segment)
1115 * and (both RPL and CPL > DPL))
1117 if ((seg_desc.type & 0xa) == 0x8 ||
1118 (((seg_desc.type & 0xc) != 0xc) &&
1119 (rpl > dpl && cpl > dpl)))
1125 /* mark segment as accessed */
1127 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1128 if (ret != X86EMUL_CONTINUE)
1132 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1133 ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1134 return X86EMUL_CONTINUE;
1136 emulate_exception(ctxt, err_vec, err_code, true);
1137 return X86EMUL_PROPAGATE_FAULT;
1140 static void write_register_operand(struct operand *op)
1142 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1143 switch (op->bytes) {
1145 *(u8 *)op->addr.reg = (u8)op->val;
1148 *(u16 *)op->addr.reg = (u16)op->val;
1151 *op->addr.reg = (u32)op->val;
1152 break; /* 64b: zero-extend */
1154 *op->addr.reg = op->val;
1159 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1160 struct x86_emulate_ops *ops)
1163 struct decode_cache *c = &ctxt->decode;
1165 switch (c->dst.type) {
1167 write_register_operand(&c->dst);
1171 rc = ops->cmpxchg_emulated(
1172 linear(ctxt, c->dst.addr.mem),
1179 rc = ops->write_emulated(
1180 linear(ctxt, c->dst.addr.mem),
1185 if (rc != X86EMUL_CONTINUE)
1189 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1197 return X86EMUL_CONTINUE;
1200 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1201 struct x86_emulate_ops *ops)
1203 struct decode_cache *c = &ctxt->decode;
1205 c->dst.type = OP_MEM;
1206 c->dst.bytes = c->op_bytes;
1207 c->dst.val = c->src.val;
1208 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1209 c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1210 c->dst.addr.mem.seg = VCPU_SREG_SS;
1213 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1214 struct x86_emulate_ops *ops,
1215 void *dest, int len)
1217 struct decode_cache *c = &ctxt->decode;
1219 struct segmented_address addr;
1221 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1222 addr.seg = VCPU_SREG_SS;
1223 rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1224 if (rc != X86EMUL_CONTINUE)
1227 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1231 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1232 struct x86_emulate_ops *ops,
1233 void *dest, int len)
1236 unsigned long val, change_mask;
1237 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1238 int cpl = ops->cpl(ctxt->vcpu);
1240 rc = emulate_pop(ctxt, ops, &val, len);
1241 if (rc != X86EMUL_CONTINUE)
1244 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1245 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1247 switch(ctxt->mode) {
1248 case X86EMUL_MODE_PROT64:
1249 case X86EMUL_MODE_PROT32:
1250 case X86EMUL_MODE_PROT16:
1252 change_mask |= EFLG_IOPL;
1254 change_mask |= EFLG_IF;
1256 case X86EMUL_MODE_VM86:
1258 return emulate_gp(ctxt, 0);
1259 change_mask |= EFLG_IF;
1261 default: /* real mode */
1262 change_mask |= (EFLG_IOPL | EFLG_IF);
1266 *(unsigned long *)dest =
1267 (ctxt->eflags & ~change_mask) | (val & change_mask);
1272 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1273 struct x86_emulate_ops *ops, int seg)
1275 struct decode_cache *c = &ctxt->decode;
1277 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1279 emulate_push(ctxt, ops);
1282 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1283 struct x86_emulate_ops *ops, int seg)
1285 struct decode_cache *c = &ctxt->decode;
1286 unsigned long selector;
1289 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1290 if (rc != X86EMUL_CONTINUE)
1293 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1297 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1298 struct x86_emulate_ops *ops)
1300 struct decode_cache *c = &ctxt->decode;
1301 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1302 int rc = X86EMUL_CONTINUE;
1303 int reg = VCPU_REGS_RAX;
1305 while (reg <= VCPU_REGS_RDI) {
1306 (reg == VCPU_REGS_RSP) ?
1307 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1309 emulate_push(ctxt, ops);
1311 rc = writeback(ctxt, ops);
1312 if (rc != X86EMUL_CONTINUE)
1318 /* Disable writeback. */
1319 c->dst.type = OP_NONE;
1324 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1325 struct x86_emulate_ops *ops)
1327 struct decode_cache *c = &ctxt->decode;
1328 int rc = X86EMUL_CONTINUE;
1329 int reg = VCPU_REGS_RDI;
1331 while (reg >= VCPU_REGS_RAX) {
1332 if (reg == VCPU_REGS_RSP) {
1333 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1338 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1339 if (rc != X86EMUL_CONTINUE)
1346 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1347 struct x86_emulate_ops *ops, int irq)
1349 struct decode_cache *c = &ctxt->decode;
1356 /* TODO: Add limit checks */
1357 c->src.val = ctxt->eflags;
1358 emulate_push(ctxt, ops);
1359 rc = writeback(ctxt, ops);
1360 if (rc != X86EMUL_CONTINUE)
1363 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1365 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1366 emulate_push(ctxt, ops);
1367 rc = writeback(ctxt, ops);
1368 if (rc != X86EMUL_CONTINUE)
1371 c->src.val = c->eip;
1372 emulate_push(ctxt, ops);
1373 rc = writeback(ctxt, ops);
1374 if (rc != X86EMUL_CONTINUE)
1377 c->dst.type = OP_NONE;
1379 ops->get_idt(&dt, ctxt->vcpu);
1381 eip_addr = dt.address + (irq << 2);
1382 cs_addr = dt.address + (irq << 2) + 2;
1384 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1385 if (rc != X86EMUL_CONTINUE)
1388 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1389 if (rc != X86EMUL_CONTINUE)
1392 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1393 if (rc != X86EMUL_CONTINUE)
1401 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1402 struct x86_emulate_ops *ops, int irq)
1404 switch(ctxt->mode) {
1405 case X86EMUL_MODE_REAL:
1406 return emulate_int_real(ctxt, ops, irq);
1407 case X86EMUL_MODE_VM86:
1408 case X86EMUL_MODE_PROT16:
1409 case X86EMUL_MODE_PROT32:
1410 case X86EMUL_MODE_PROT64:
1412 /* Protected mode interrupts unimplemented yet */
1413 return X86EMUL_UNHANDLEABLE;
1417 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1418 struct x86_emulate_ops *ops)
1420 struct decode_cache *c = &ctxt->decode;
1421 int rc = X86EMUL_CONTINUE;
1422 unsigned long temp_eip = 0;
1423 unsigned long temp_eflags = 0;
1424 unsigned long cs = 0;
1425 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1426 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1427 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1428 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1430 /* TODO: Add stack limit check */
1432 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1434 if (rc != X86EMUL_CONTINUE)
1437 if (temp_eip & ~0xffff)
1438 return emulate_gp(ctxt, 0);
1440 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1442 if (rc != X86EMUL_CONTINUE)
1445 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1447 if (rc != X86EMUL_CONTINUE)
1450 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1452 if (rc != X86EMUL_CONTINUE)
1458 if (c->op_bytes == 4)
1459 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1460 else if (c->op_bytes == 2) {
1461 ctxt->eflags &= ~0xffff;
1462 ctxt->eflags |= temp_eflags;
1465 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1466 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1471 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1472 struct x86_emulate_ops* ops)
1474 switch(ctxt->mode) {
1475 case X86EMUL_MODE_REAL:
1476 return emulate_iret_real(ctxt, ops);
1477 case X86EMUL_MODE_VM86:
1478 case X86EMUL_MODE_PROT16:
1479 case X86EMUL_MODE_PROT32:
1480 case X86EMUL_MODE_PROT64:
1482 /* iret from protected mode unimplemented yet */
1483 return X86EMUL_UNHANDLEABLE;
1487 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1488 struct x86_emulate_ops *ops)
1490 struct decode_cache *c = &ctxt->decode;
1492 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1495 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1497 struct decode_cache *c = &ctxt->decode;
1498 switch (c->modrm_reg) {
1500 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1503 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1506 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1509 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1511 case 4: /* sal/shl */
1512 case 6: /* sal/shl */
1513 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1516 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1519 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1524 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1525 struct x86_emulate_ops *ops)
1527 struct decode_cache *c = &ctxt->decode;
1528 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1529 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1532 switch (c->modrm_reg) {
1533 case 0 ... 1: /* test */
1534 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1537 c->dst.val = ~c->dst.val;
1540 emulate_1op("neg", c->dst, ctxt->eflags);
1543 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1546 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1549 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1553 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1557 return X86EMUL_UNHANDLEABLE;
1560 return emulate_de(ctxt);
1561 return X86EMUL_CONTINUE;
1564 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1565 struct x86_emulate_ops *ops)
1567 struct decode_cache *c = &ctxt->decode;
1569 switch (c->modrm_reg) {
1571 emulate_1op("inc", c->dst, ctxt->eflags);
1574 emulate_1op("dec", c->dst, ctxt->eflags);
1576 case 2: /* call near abs */ {
1579 c->eip = c->src.val;
1580 c->src.val = old_eip;
1581 emulate_push(ctxt, ops);
1584 case 4: /* jmp abs */
1585 c->eip = c->src.val;
1588 emulate_push(ctxt, ops);
1591 return X86EMUL_CONTINUE;
1594 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1595 struct x86_emulate_ops *ops)
1597 struct decode_cache *c = &ctxt->decode;
1598 u64 old = c->dst.orig_val64;
1600 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1601 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1602 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1603 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1604 ctxt->eflags &= ~EFLG_ZF;
1606 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1607 (u32) c->regs[VCPU_REGS_RBX];
1609 ctxt->eflags |= EFLG_ZF;
1611 return X86EMUL_CONTINUE;
1614 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1615 struct x86_emulate_ops *ops)
1617 struct decode_cache *c = &ctxt->decode;
1621 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1622 if (rc != X86EMUL_CONTINUE)
1624 if (c->op_bytes == 4)
1625 c->eip = (u32)c->eip;
1626 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1627 if (rc != X86EMUL_CONTINUE)
1629 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1633 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1634 struct x86_emulate_ops *ops, int seg)
1636 struct decode_cache *c = &ctxt->decode;
1640 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1642 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1643 if (rc != X86EMUL_CONTINUE)
1646 c->dst.val = c->src.val;
1651 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1652 struct x86_emulate_ops *ops, struct desc_struct *cs,
1653 struct desc_struct *ss)
1655 memset(cs, 0, sizeof(struct desc_struct));
1656 ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1657 memset(ss, 0, sizeof(struct desc_struct));
1659 cs->l = 0; /* will be adjusted later */
1660 set_desc_base(cs, 0); /* flat segment */
1661 cs->g = 1; /* 4kb granularity */
1662 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1663 cs->type = 0x0b; /* Read, Execute, Accessed */
1665 cs->dpl = 0; /* will be adjusted later */
1669 set_desc_base(ss, 0); /* flat segment */
1670 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1671 ss->g = 1; /* 4kb granularity */
1673 ss->type = 0x03; /* Read/Write, Accessed */
1674 ss->d = 1; /* 32bit stack segment */
1680 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1682 struct decode_cache *c = &ctxt->decode;
1683 struct desc_struct cs, ss;
1687 /* syscall is not available in real mode */
1688 if (ctxt->mode == X86EMUL_MODE_REAL ||
1689 ctxt->mode == X86EMUL_MODE_VM86)
1690 return emulate_ud(ctxt);
1692 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1694 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1696 cs_sel = (u16)(msr_data & 0xfffc);
1697 ss_sel = (u16)(msr_data + 8);
1699 if (is_long_mode(ctxt->vcpu)) {
1703 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1704 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1705 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1706 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1708 c->regs[VCPU_REGS_RCX] = c->eip;
1709 if (is_long_mode(ctxt->vcpu)) {
1710 #ifdef CONFIG_X86_64
1711 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1713 ops->get_msr(ctxt->vcpu,
1714 ctxt->mode == X86EMUL_MODE_PROT64 ?
1715 MSR_LSTAR : MSR_CSTAR, &msr_data);
1718 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1719 ctxt->eflags &= ~(msr_data | EFLG_RF);
1723 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1724 c->eip = (u32)msr_data;
1726 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1729 return X86EMUL_CONTINUE;
1733 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1735 struct decode_cache *c = &ctxt->decode;
1736 struct desc_struct cs, ss;
1740 /* inject #GP if in real mode */
1741 if (ctxt->mode == X86EMUL_MODE_REAL)
1742 return emulate_gp(ctxt, 0);
1744 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1745 * Therefore, we inject an #UD.
1747 if (ctxt->mode == X86EMUL_MODE_PROT64)
1748 return emulate_ud(ctxt);
1750 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1752 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1753 switch (ctxt->mode) {
1754 case X86EMUL_MODE_PROT32:
1755 if ((msr_data & 0xfffc) == 0x0)
1756 return emulate_gp(ctxt, 0);
1758 case X86EMUL_MODE_PROT64:
1759 if (msr_data == 0x0)
1760 return emulate_gp(ctxt, 0);
1764 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1765 cs_sel = (u16)msr_data;
1766 cs_sel &= ~SELECTOR_RPL_MASK;
1767 ss_sel = cs_sel + 8;
1768 ss_sel &= ~SELECTOR_RPL_MASK;
1769 if (ctxt->mode == X86EMUL_MODE_PROT64
1770 || is_long_mode(ctxt->vcpu)) {
1775 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1776 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1777 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1778 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1780 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1783 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1784 c->regs[VCPU_REGS_RSP] = msr_data;
1786 return X86EMUL_CONTINUE;
1790 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1792 struct decode_cache *c = &ctxt->decode;
1793 struct desc_struct cs, ss;
1798 /* inject #GP if in real mode or Virtual 8086 mode */
1799 if (ctxt->mode == X86EMUL_MODE_REAL ||
1800 ctxt->mode == X86EMUL_MODE_VM86)
1801 return emulate_gp(ctxt, 0);
1803 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1805 if ((c->rex_prefix & 0x8) != 0x0)
1806 usermode = X86EMUL_MODE_PROT64;
1808 usermode = X86EMUL_MODE_PROT32;
1812 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1814 case X86EMUL_MODE_PROT32:
1815 cs_sel = (u16)(msr_data + 16);
1816 if ((msr_data & 0xfffc) == 0x0)
1817 return emulate_gp(ctxt, 0);
1818 ss_sel = (u16)(msr_data + 24);
1820 case X86EMUL_MODE_PROT64:
1821 cs_sel = (u16)(msr_data + 32);
1822 if (msr_data == 0x0)
1823 return emulate_gp(ctxt, 0);
1824 ss_sel = cs_sel + 8;
1829 cs_sel |= SELECTOR_RPL_MASK;
1830 ss_sel |= SELECTOR_RPL_MASK;
1832 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1833 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1834 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1835 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1837 c->eip = c->regs[VCPU_REGS_RDX];
1838 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1840 return X86EMUL_CONTINUE;
1843 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1844 struct x86_emulate_ops *ops)
1847 if (ctxt->mode == X86EMUL_MODE_REAL)
1849 if (ctxt->mode == X86EMUL_MODE_VM86)
1851 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1852 return ops->cpl(ctxt->vcpu) > iopl;
1855 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1856 struct x86_emulate_ops *ops,
1859 struct desc_struct tr_seg;
1862 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
1863 unsigned mask = (1 << len) - 1;
1866 ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1869 if (desc_limit_scaled(&tr_seg) < 103)
1871 base = get_desc_base(&tr_seg);
1872 #ifdef CONFIG_X86_64
1873 base |= ((u64)base3) << 32;
1875 r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1876 if (r != X86EMUL_CONTINUE)
1878 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1880 r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu,
1882 if (r != X86EMUL_CONTINUE)
1884 if ((perm >> bit_idx) & mask)
1889 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1890 struct x86_emulate_ops *ops,
1896 if (emulator_bad_iopl(ctxt, ops))
1897 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1900 ctxt->perm_ok = true;
1905 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1906 struct x86_emulate_ops *ops,
1907 struct tss_segment_16 *tss)
1909 struct decode_cache *c = &ctxt->decode;
1912 tss->flag = ctxt->eflags;
1913 tss->ax = c->regs[VCPU_REGS_RAX];
1914 tss->cx = c->regs[VCPU_REGS_RCX];
1915 tss->dx = c->regs[VCPU_REGS_RDX];
1916 tss->bx = c->regs[VCPU_REGS_RBX];
1917 tss->sp = c->regs[VCPU_REGS_RSP];
1918 tss->bp = c->regs[VCPU_REGS_RBP];
1919 tss->si = c->regs[VCPU_REGS_RSI];
1920 tss->di = c->regs[VCPU_REGS_RDI];
1922 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1923 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1924 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1925 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1926 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1929 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1930 struct x86_emulate_ops *ops,
1931 struct tss_segment_16 *tss)
1933 struct decode_cache *c = &ctxt->decode;
1937 ctxt->eflags = tss->flag | 2;
1938 c->regs[VCPU_REGS_RAX] = tss->ax;
1939 c->regs[VCPU_REGS_RCX] = tss->cx;
1940 c->regs[VCPU_REGS_RDX] = tss->dx;
1941 c->regs[VCPU_REGS_RBX] = tss->bx;
1942 c->regs[VCPU_REGS_RSP] = tss->sp;
1943 c->regs[VCPU_REGS_RBP] = tss->bp;
1944 c->regs[VCPU_REGS_RSI] = tss->si;
1945 c->regs[VCPU_REGS_RDI] = tss->di;
1948 * SDM says that segment selectors are loaded before segment
1951 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1952 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1953 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1954 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1955 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1958 * Now load segment descriptors. If fault happenes at this stage
1959 * it is handled in a context of new task
1961 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1962 if (ret != X86EMUL_CONTINUE)
1964 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1965 if (ret != X86EMUL_CONTINUE)
1967 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1968 if (ret != X86EMUL_CONTINUE)
1970 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1971 if (ret != X86EMUL_CONTINUE)
1973 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1974 if (ret != X86EMUL_CONTINUE)
1977 return X86EMUL_CONTINUE;
1980 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1981 struct x86_emulate_ops *ops,
1982 u16 tss_selector, u16 old_tss_sel,
1983 ulong old_tss_base, struct desc_struct *new_desc)
1985 struct tss_segment_16 tss_seg;
1987 u32 new_tss_base = get_desc_base(new_desc);
1989 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1991 if (ret != X86EMUL_CONTINUE)
1992 /* FIXME: need to provide precise fault address */
1995 save_state_to_tss16(ctxt, ops, &tss_seg);
1997 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1999 if (ret != X86EMUL_CONTINUE)
2000 /* FIXME: need to provide precise fault address */
2003 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2005 if (ret != X86EMUL_CONTINUE)
2006 /* FIXME: need to provide precise fault address */
2009 if (old_tss_sel != 0xffff) {
2010 tss_seg.prev_task_link = old_tss_sel;
2012 ret = ops->write_std(new_tss_base,
2013 &tss_seg.prev_task_link,
2014 sizeof tss_seg.prev_task_link,
2015 ctxt->vcpu, &ctxt->exception);
2016 if (ret != X86EMUL_CONTINUE)
2017 /* FIXME: need to provide precise fault address */
2021 return load_state_from_tss16(ctxt, ops, &tss_seg);
2024 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2025 struct x86_emulate_ops *ops,
2026 struct tss_segment_32 *tss)
2028 struct decode_cache *c = &ctxt->decode;
2030 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
2032 tss->eflags = ctxt->eflags;
2033 tss->eax = c->regs[VCPU_REGS_RAX];
2034 tss->ecx = c->regs[VCPU_REGS_RCX];
2035 tss->edx = c->regs[VCPU_REGS_RDX];
2036 tss->ebx = c->regs[VCPU_REGS_RBX];
2037 tss->esp = c->regs[VCPU_REGS_RSP];
2038 tss->ebp = c->regs[VCPU_REGS_RBP];
2039 tss->esi = c->regs[VCPU_REGS_RSI];
2040 tss->edi = c->regs[VCPU_REGS_RDI];
2042 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2043 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2044 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2045 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2046 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2047 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2048 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2051 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2052 struct x86_emulate_ops *ops,
2053 struct tss_segment_32 *tss)
2055 struct decode_cache *c = &ctxt->decode;
2058 if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
2059 return emulate_gp(ctxt, 0);
2061 ctxt->eflags = tss->eflags | 2;
2062 c->regs[VCPU_REGS_RAX] = tss->eax;
2063 c->regs[VCPU_REGS_RCX] = tss->ecx;
2064 c->regs[VCPU_REGS_RDX] = tss->edx;
2065 c->regs[VCPU_REGS_RBX] = tss->ebx;
2066 c->regs[VCPU_REGS_RSP] = tss->esp;
2067 c->regs[VCPU_REGS_RBP] = tss->ebp;
2068 c->regs[VCPU_REGS_RSI] = tss->esi;
2069 c->regs[VCPU_REGS_RDI] = tss->edi;
2072 * SDM says that segment selectors are loaded before segment
2075 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2076 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2077 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2078 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2079 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2080 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2081 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2084 * Now load segment descriptors. If fault happenes at this stage
2085 * it is handled in a context of new task
2087 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2088 if (ret != X86EMUL_CONTINUE)
2090 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2091 if (ret != X86EMUL_CONTINUE)
2093 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2094 if (ret != X86EMUL_CONTINUE)
2096 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2097 if (ret != X86EMUL_CONTINUE)
2099 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2100 if (ret != X86EMUL_CONTINUE)
2102 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2103 if (ret != X86EMUL_CONTINUE)
2105 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2106 if (ret != X86EMUL_CONTINUE)
2109 return X86EMUL_CONTINUE;
2112 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2113 struct x86_emulate_ops *ops,
2114 u16 tss_selector, u16 old_tss_sel,
2115 ulong old_tss_base, struct desc_struct *new_desc)
2117 struct tss_segment_32 tss_seg;
2119 u32 new_tss_base = get_desc_base(new_desc);
2121 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2123 if (ret != X86EMUL_CONTINUE)
2124 /* FIXME: need to provide precise fault address */
2127 save_state_to_tss32(ctxt, ops, &tss_seg);
2129 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2131 if (ret != X86EMUL_CONTINUE)
2132 /* FIXME: need to provide precise fault address */
2135 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2137 if (ret != X86EMUL_CONTINUE)
2138 /* FIXME: need to provide precise fault address */
2141 if (old_tss_sel != 0xffff) {
2142 tss_seg.prev_task_link = old_tss_sel;
2144 ret = ops->write_std(new_tss_base,
2145 &tss_seg.prev_task_link,
2146 sizeof tss_seg.prev_task_link,
2147 ctxt->vcpu, &ctxt->exception);
2148 if (ret != X86EMUL_CONTINUE)
2149 /* FIXME: need to provide precise fault address */
2153 return load_state_from_tss32(ctxt, ops, &tss_seg);
2156 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2157 struct x86_emulate_ops *ops,
2158 u16 tss_selector, int reason,
2159 bool has_error_code, u32 error_code)
2161 struct desc_struct curr_tss_desc, next_tss_desc;
2163 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2164 ulong old_tss_base =
2165 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2168 /* FIXME: old_tss_base == ~0 ? */
2170 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2171 if (ret != X86EMUL_CONTINUE)
2173 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2174 if (ret != X86EMUL_CONTINUE)
2177 /* FIXME: check that next_tss_desc is tss */
2179 if (reason != TASK_SWITCH_IRET) {
2180 if ((tss_selector & 3) > next_tss_desc.dpl ||
2181 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
2182 return emulate_gp(ctxt, 0);
2185 desc_limit = desc_limit_scaled(&next_tss_desc);
2186 if (!next_tss_desc.p ||
2187 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2188 desc_limit < 0x2b)) {
2189 emulate_ts(ctxt, tss_selector & 0xfffc);
2190 return X86EMUL_PROPAGATE_FAULT;
2193 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2194 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2195 write_segment_descriptor(ctxt, ops, old_tss_sel,
2199 if (reason == TASK_SWITCH_IRET)
2200 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2202 /* set back link to prev task only if NT bit is set in eflags
2203 note that old_tss_sel is not used afetr this point */
2204 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2205 old_tss_sel = 0xffff;
2207 if (next_tss_desc.type & 8)
2208 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2209 old_tss_base, &next_tss_desc);
2211 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2212 old_tss_base, &next_tss_desc);
2213 if (ret != X86EMUL_CONTINUE)
2216 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2217 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2219 if (reason != TASK_SWITCH_IRET) {
2220 next_tss_desc.type |= (1 << 1); /* set busy flag */
2221 write_segment_descriptor(ctxt, ops, tss_selector,
2225 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2226 ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2227 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2229 if (has_error_code) {
2230 struct decode_cache *c = &ctxt->decode;
2232 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2234 c->src.val = (unsigned long) error_code;
2235 emulate_push(ctxt, ops);
2241 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2242 u16 tss_selector, int reason,
2243 bool has_error_code, u32 error_code)
2245 struct x86_emulate_ops *ops = ctxt->ops;
2246 struct decode_cache *c = &ctxt->decode;
2250 c->dst.type = OP_NONE;
2252 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2253 has_error_code, error_code);
2255 if (rc == X86EMUL_CONTINUE) {
2256 rc = writeback(ctxt, ops);
2257 if (rc == X86EMUL_CONTINUE)
2261 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2264 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2265 int reg, struct operand *op)
2267 struct decode_cache *c = &ctxt->decode;
2268 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2270 register_address_increment(c, &c->regs[reg], df * op->bytes);
2271 op->addr.mem.ea = register_address(c, c->regs[reg]);
2272 op->addr.mem.seg = seg;
2275 static int em_push(struct x86_emulate_ctxt *ctxt)
2277 emulate_push(ctxt, ctxt->ops);
2278 return X86EMUL_CONTINUE;
2281 static int em_das(struct x86_emulate_ctxt *ctxt)
2283 struct decode_cache *c = &ctxt->decode;
2285 bool af, cf, old_cf;
2287 cf = ctxt->eflags & X86_EFLAGS_CF;
2293 af = ctxt->eflags & X86_EFLAGS_AF;
2294 if ((al & 0x0f) > 9 || af) {
2296 cf = old_cf | (al >= 250);
2301 if (old_al > 0x99 || old_cf) {
2307 /* Set PF, ZF, SF */
2308 c->src.type = OP_IMM;
2311 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2312 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2314 ctxt->eflags |= X86_EFLAGS_CF;
2316 ctxt->eflags |= X86_EFLAGS_AF;
2317 return X86EMUL_CONTINUE;
2320 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2322 struct decode_cache *c = &ctxt->decode;
2327 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2330 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2331 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2332 return X86EMUL_CONTINUE;
2335 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2337 c->src.val = old_cs;
2338 emulate_push(ctxt, ctxt->ops);
2339 rc = writeback(ctxt, ctxt->ops);
2340 if (rc != X86EMUL_CONTINUE)
2343 c->src.val = old_eip;
2344 emulate_push(ctxt, ctxt->ops);
2345 rc = writeback(ctxt, ctxt->ops);
2346 if (rc != X86EMUL_CONTINUE)
2349 c->dst.type = OP_NONE;
2351 return X86EMUL_CONTINUE;
2354 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2356 struct decode_cache *c = &ctxt->decode;
2359 c->dst.type = OP_REG;
2360 c->dst.addr.reg = &c->eip;
2361 c->dst.bytes = c->op_bytes;
2362 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2363 if (rc != X86EMUL_CONTINUE)
2365 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2366 return X86EMUL_CONTINUE;
2369 static int em_imul(struct x86_emulate_ctxt *ctxt)
2371 struct decode_cache *c = &ctxt->decode;
2373 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2374 return X86EMUL_CONTINUE;
2377 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2379 struct decode_cache *c = &ctxt->decode;
2381 c->dst.val = c->src2.val;
2382 return em_imul(ctxt);
2385 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2387 struct decode_cache *c = &ctxt->decode;
2389 c->dst.type = OP_REG;
2390 c->dst.bytes = c->src.bytes;
2391 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2392 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2394 return X86EMUL_CONTINUE;
2397 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2399 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2400 struct decode_cache *c = &ctxt->decode;
2403 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
2404 return emulate_gp(ctxt, 0);
2405 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2406 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2407 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2408 return X86EMUL_CONTINUE;
2411 static int em_mov(struct x86_emulate_ctxt *ctxt)
2413 struct decode_cache *c = &ctxt->decode;
2414 c->dst.val = c->src.val;
2415 return X86EMUL_CONTINUE;
2418 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2420 struct decode_cache *c = &ctxt->decode;
2421 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2422 return X86EMUL_CONTINUE;
2425 #define D(_y) { .flags = (_y) }
2427 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2428 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2429 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2430 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2432 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2433 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2435 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2436 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2437 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2440 static struct opcode group1[] = {
2444 static struct opcode group1A[] = {
2445 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2448 static struct opcode group3[] = {
2449 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2450 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2451 X4(D(SrcMem | ModRM)),
2454 static struct opcode group4[] = {
2455 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2459 static struct opcode group5[] = {
2460 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2461 D(SrcMem | ModRM | Stack),
2462 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2463 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2464 D(SrcMem | ModRM | Stack), N,
2467 static struct group_dual group7 = { {
2468 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2469 D(SrcNone | ModRM | DstMem | Mov), N,
2470 D(SrcMem16 | ModRM | Mov | Priv),
2471 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2473 D(SrcNone | ModRM | Priv | VendorSpecific), N,
2474 N, D(SrcNone | ModRM | Priv | VendorSpecific),
2475 D(SrcNone | ModRM | DstMem | Mov), N,
2476 D(SrcMem16 | ModRM | Mov | Priv), N,
2479 static struct opcode group8[] = {
2481 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2482 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2485 static struct group_dual group9 = { {
2486 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2488 N, N, N, N, N, N, N, N,
2491 static struct opcode group11[] = {
2492 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2495 static struct gprefix pfx_0f_6f_0f_7f = {
2496 N, N, N, I(Sse, em_movdqu),
2499 static struct opcode opcode_table[256] = {
2502 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2505 D(ImplicitOps | Stack | No64), N,
2508 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2511 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2515 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2523 X8(I(SrcReg | Stack, em_push)),
2525 X8(D(DstReg | Stack)),
2527 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2528 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2531 I(SrcImm | Mov | Stack, em_push),
2532 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2533 I(SrcImmByte | Mov | Stack, em_push),
2534 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2535 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2536 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2540 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2541 G(DstMem | SrcImm | ModRM | Group, group1),
2542 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2543 G(DstMem | SrcImmByte | ModRM | Group, group1),
2544 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2546 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2547 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2548 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2549 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2551 X8(D(SrcAcc | DstReg)),
2553 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2554 I(SrcImmFAddr | No64, em_call_far), N,
2555 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2557 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2558 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2559 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2560 D2bv(SrcSI | DstDI | String),
2562 D2bv(DstAcc | SrcImm),
2563 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2564 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2565 D2bv(SrcAcc | DstDI | String),
2567 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2569 X8(I(DstReg | SrcImm | Mov, em_mov)),
2571 D2bv(DstMem | SrcImmByte | ModRM),
2572 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2573 D(ImplicitOps | Stack),
2574 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2575 G(ByteOp, group11), G(0, group11),
2577 N, N, N, D(ImplicitOps | Stack),
2578 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2580 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2583 N, N, N, N, N, N, N, N,
2586 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2588 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2589 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2590 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2593 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2595 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2596 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2599 static struct opcode twobyte_table[256] = {
2601 N, GD(0, &group7), N, N,
2602 N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
2603 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2604 N, D(ImplicitOps | ModRM), N, N,
2606 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2608 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2609 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2611 N, N, N, N, N, N, N, N,
2613 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2614 D(ImplicitOps | Priv), N,
2615 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
2617 N, N, N, N, N, N, N, N,
2619 X16(D(DstReg | SrcMem | ModRM | Mov)),
2621 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2626 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
2631 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
2635 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2637 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2638 N, D(DstMem | SrcReg | ModRM | BitOp),
2639 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2640 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2642 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2643 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2644 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2645 D(DstMem | SrcReg | Src2CL | ModRM),
2646 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2648 D2bv(DstMem | SrcReg | ModRM | Lock),
2649 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2650 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2651 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2654 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2655 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2656 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2658 D2bv(DstMem | SrcReg | ModRM | Lock),
2659 N, D(DstMem | SrcReg | ModRM | Mov),
2660 N, N, N, GD(0, &group9),
2661 N, N, N, N, N, N, N, N,
2663 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2665 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2667 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2681 static unsigned imm_size(struct decode_cache *c)
2685 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2691 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2692 unsigned size, bool sign_extension)
2694 struct decode_cache *c = &ctxt->decode;
2695 struct x86_emulate_ops *ops = ctxt->ops;
2696 int rc = X86EMUL_CONTINUE;
2700 op->addr.mem.ea = c->eip;
2701 /* NB. Immediates are sign-extended as necessary. */
2702 switch (op->bytes) {
2704 op->val = insn_fetch(s8, 1, c->eip);
2707 op->val = insn_fetch(s16, 2, c->eip);
2710 op->val = insn_fetch(s32, 4, c->eip);
2713 if (!sign_extension) {
2714 switch (op->bytes) {
2722 op->val &= 0xffffffff;
2731 x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
2733 struct x86_emulate_ops *ops = ctxt->ops;
2734 struct decode_cache *c = &ctxt->decode;
2735 int rc = X86EMUL_CONTINUE;
2736 int mode = ctxt->mode;
2737 int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
2738 bool op_prefix = false;
2739 struct opcode opcode, *g_mod012, *g_mod3;
2740 struct operand memop = { .type = OP_NONE };
2743 c->fetch.start = c->eip;
2744 c->fetch.end = c->fetch.start + insn_len;
2746 memcpy(c->fetch.data, insn, insn_len);
2747 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2750 case X86EMUL_MODE_REAL:
2751 case X86EMUL_MODE_VM86:
2752 case X86EMUL_MODE_PROT16:
2753 def_op_bytes = def_ad_bytes = 2;
2755 case X86EMUL_MODE_PROT32:
2756 def_op_bytes = def_ad_bytes = 4;
2758 #ifdef CONFIG_X86_64
2759 case X86EMUL_MODE_PROT64:
2768 c->op_bytes = def_op_bytes;
2769 c->ad_bytes = def_ad_bytes;
2771 /* Legacy prefixes. */
2773 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2774 case 0x66: /* operand-size override */
2776 /* switch between 2/4 bytes */
2777 c->op_bytes = def_op_bytes ^ 6;
2779 case 0x67: /* address-size override */
2780 if (mode == X86EMUL_MODE_PROT64)
2781 /* switch between 4/8 bytes */
2782 c->ad_bytes = def_ad_bytes ^ 12;
2784 /* switch between 2/4 bytes */
2785 c->ad_bytes = def_ad_bytes ^ 6;
2787 case 0x26: /* ES override */
2788 case 0x2e: /* CS override */
2789 case 0x36: /* SS override */
2790 case 0x3e: /* DS override */
2791 set_seg_override(c, (c->b >> 3) & 3);
2793 case 0x64: /* FS override */
2794 case 0x65: /* GS override */
2795 set_seg_override(c, c->b & 7);
2797 case 0x40 ... 0x4f: /* REX */
2798 if (mode != X86EMUL_MODE_PROT64)
2800 c->rex_prefix = c->b;
2802 case 0xf0: /* LOCK */
2805 case 0xf2: /* REPNE/REPNZ */
2806 case 0xf3: /* REP/REPE/REPZ */
2807 c->rep_prefix = c->b;
2813 /* Any legacy prefix after a REX prefix nullifies its effect. */
2821 if (c->rex_prefix & 8)
2822 c->op_bytes = 8; /* REX.W */
2824 /* Opcode byte(s). */
2825 opcode = opcode_table[c->b];
2826 /* Two-byte opcode? */
2829 c->b = insn_fetch(u8, 1, c->eip);
2830 opcode = twobyte_table[c->b];
2832 c->d = opcode.flags;
2835 dual = c->d & GroupDual;
2836 c->modrm = insn_fetch(u8, 1, c->eip);
2839 if (c->d & GroupDual) {
2840 g_mod012 = opcode.u.gdual->mod012;
2841 g_mod3 = opcode.u.gdual->mod3;
2843 g_mod012 = g_mod3 = opcode.u.group;
2845 c->d &= ~(Group | GroupDual);
2847 goffset = (c->modrm >> 3) & 7;
2849 if ((c->modrm >> 6) == 3)
2850 opcode = g_mod3[goffset];
2852 opcode = g_mod012[goffset];
2853 c->d |= opcode.flags;
2856 if (c->d & Prefix) {
2857 if (c->rep_prefix && op_prefix)
2858 return X86EMUL_UNHANDLEABLE;
2859 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
2860 switch (simd_prefix) {
2861 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
2862 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
2863 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
2864 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
2866 c->d |= opcode.flags;
2869 c->execute = opcode.u.execute;
2872 if (c->d == 0 || (c->d & Undefined))
2875 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
2878 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2881 if (c->d & Op3264) {
2882 if (mode == X86EMUL_MODE_PROT64)
2891 /* ModRM and SIB bytes. */
2893 rc = decode_modrm(ctxt, ops, &memop);
2894 if (!c->has_seg_override)
2895 set_seg_override(c, c->modrm_seg);
2896 } else if (c->d & MemAbs)
2897 rc = decode_abs(ctxt, ops, &memop);
2898 if (rc != X86EMUL_CONTINUE)
2901 if (!c->has_seg_override)
2902 set_seg_override(c, VCPU_SREG_DS);
2904 memop.addr.mem.seg = seg_override(ctxt, ops, c);
2906 if (memop.type == OP_MEM && c->ad_bytes != 8)
2907 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2909 if (memop.type == OP_MEM && c->rip_relative)
2910 memop.addr.mem.ea += c->eip;
2913 * Decode and fetch the source operand: register, memory
2916 switch (c->d & SrcMask) {
2920 decode_register_operand(ctxt, &c->src, c, 0);
2929 memop.bytes = (c->d & ByteOp) ? 1 :
2935 rc = decode_imm(ctxt, &c->src, 2, false);
2938 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2941 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2944 rc = decode_imm(ctxt, &c->src, 1, true);
2947 rc = decode_imm(ctxt, &c->src, 1, false);
2950 c->src.type = OP_REG;
2951 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2952 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2953 fetch_register_operand(&c->src);
2960 c->src.type = OP_MEM;
2961 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2962 c->src.addr.mem.ea =
2963 register_address(c, c->regs[VCPU_REGS_RSI]);
2964 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2968 c->src.type = OP_IMM;
2969 c->src.addr.mem.ea = c->eip;
2970 c->src.bytes = c->op_bytes + 2;
2971 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2974 memop.bytes = c->op_bytes + 2;
2979 if (rc != X86EMUL_CONTINUE)
2983 * Decode and fetch the second source operand: register, memory
2986 switch (c->d & Src2Mask) {
2991 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2994 rc = decode_imm(ctxt, &c->src2, 1, true);
3001 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3005 if (rc != X86EMUL_CONTINUE)
3008 /* Decode and fetch the destination operand: register or memory. */
3009 switch (c->d & DstMask) {
3011 decode_register_operand(ctxt, &c->dst, c,
3012 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3015 c->dst.type = OP_IMM;
3016 c->dst.addr.mem.ea = c->eip;
3018 c->dst.val = insn_fetch(u8, 1, c->eip);
3023 if ((c->d & DstMask) == DstMem64)
3026 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3028 fetch_bit_operand(c);
3029 c->dst.orig_val = c->dst.val;
3032 c->dst.type = OP_REG;
3033 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3034 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3035 fetch_register_operand(&c->dst);
3036 c->dst.orig_val = c->dst.val;
3039 c->dst.type = OP_MEM;
3040 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3041 c->dst.addr.mem.ea =
3042 register_address(c, c->regs[VCPU_REGS_RDI]);
3043 c->dst.addr.mem.seg = VCPU_SREG_ES;
3047 /* Special instructions do their own operand decoding. */
3049 c->dst.type = OP_NONE; /* Disable writeback. */
3054 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3057 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3059 struct decode_cache *c = &ctxt->decode;
3061 /* The second termination condition only applies for REPE
3062 * and REPNE. Test if the repeat string operation prefix is
3063 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3064 * corresponding termination condition according to:
3065 * - if REPE/REPZ and ZF = 0 then done
3066 * - if REPNE/REPNZ and ZF = 1 then done
3068 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3069 (c->b == 0xae) || (c->b == 0xaf))
3070 && (((c->rep_prefix == REPE_PREFIX) &&
3071 ((ctxt->eflags & EFLG_ZF) == 0))
3072 || ((c->rep_prefix == REPNE_PREFIX) &&
3073 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3080 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3082 struct x86_emulate_ops *ops = ctxt->ops;
3084 struct decode_cache *c = &ctxt->decode;
3085 int rc = X86EMUL_CONTINUE;
3086 int saved_dst_type = c->dst.type;
3087 int irq; /* Used for int 3, int, and into */
3089 ctxt->decode.mem_read.pos = 0;
3091 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3092 rc = emulate_ud(ctxt);
3096 /* LOCK prefix is allowed only with some instructions */
3097 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3098 rc = emulate_ud(ctxt);
3102 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3103 rc = emulate_ud(ctxt);
3108 && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM)
3109 || !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) {
3110 rc = emulate_ud(ctxt);
3114 if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) {
3115 rc = emulate_nm(ctxt);
3119 /* Privileged instruction can be executed only in CPL=0 */
3120 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3121 rc = emulate_gp(ctxt, 0);
3125 if (c->rep_prefix && (c->d & String)) {
3126 /* All REP prefixes have the same first termination condition */
3127 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3133 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3134 rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3135 c->src.valptr, c->src.bytes);
3136 if (rc != X86EMUL_CONTINUE)
3138 c->src.orig_val64 = c->src.val64;
3141 if (c->src2.type == OP_MEM) {
3142 rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3143 &c->src2.val, c->src2.bytes);
3144 if (rc != X86EMUL_CONTINUE)
3148 if ((c->d & DstMask) == ImplicitOps)
3152 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3153 /* optimisation - avoid slow emulated read if Mov */
3154 rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3155 &c->dst.val, c->dst.bytes);
3156 if (rc != X86EMUL_CONTINUE)
3159 c->dst.orig_val = c->dst.val;
3164 rc = c->execute(ctxt);
3165 if (rc != X86EMUL_CONTINUE)
3176 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3178 case 0x06: /* push es */
3179 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3181 case 0x07: /* pop es */
3182 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3186 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3188 case 0x0e: /* push cs */
3189 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3193 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3195 case 0x16: /* push ss */
3196 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3198 case 0x17: /* pop ss */
3199 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3203 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3205 case 0x1e: /* push ds */
3206 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3208 case 0x1f: /* pop ds */
3209 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3213 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3217 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3221 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3225 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3227 case 0x40 ... 0x47: /* inc r16/r32 */
3228 emulate_1op("inc", c->dst, ctxt->eflags);
3230 case 0x48 ... 0x4f: /* dec r16/r32 */
3231 emulate_1op("dec", c->dst, ctxt->eflags);
3233 case 0x58 ... 0x5f: /* pop reg */
3235 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3237 case 0x60: /* pusha */
3238 rc = emulate_pusha(ctxt, ops);
3240 case 0x61: /* popa */
3241 rc = emulate_popa(ctxt, ops);
3243 case 0x63: /* movsxd */
3244 if (ctxt->mode != X86EMUL_MODE_PROT64)
3245 goto cannot_emulate;
3246 c->dst.val = (s32) c->src.val;
3248 case 0x6c: /* insb */
3249 case 0x6d: /* insw/insd */
3250 c->src.val = c->regs[VCPU_REGS_RDX];
3252 case 0x6e: /* outsb */
3253 case 0x6f: /* outsw/outsd */
3254 c->dst.val = c->regs[VCPU_REGS_RDX];
3257 case 0x70 ... 0x7f: /* jcc (short) */
3258 if (test_cc(c->b, ctxt->eflags))
3259 jmp_rel(c, c->src.val);
3261 case 0x80 ... 0x83: /* Grp1 */
3262 switch (c->modrm_reg) {
3283 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3285 case 0x86 ... 0x87: /* xchg */
3287 /* Write back the register source. */
3288 c->src.val = c->dst.val;
3289 write_register_operand(&c->src);
3291 * Write back the memory destination with implicit LOCK
3294 c->dst.val = c->src.orig_val;
3297 case 0x8c: /* mov r/m, sreg */
3298 if (c->modrm_reg > VCPU_SREG_GS) {
3299 rc = emulate_ud(ctxt);
3302 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3304 case 0x8d: /* lea r16/r32, m */
3305 c->dst.val = c->src.addr.mem.ea;
3307 case 0x8e: { /* mov seg, r/m16 */
3312 if (c->modrm_reg == VCPU_SREG_CS ||
3313 c->modrm_reg > VCPU_SREG_GS) {
3314 rc = emulate_ud(ctxt);
3318 if (c->modrm_reg == VCPU_SREG_SS)
3319 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3321 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3323 c->dst.type = OP_NONE; /* Disable writeback. */
3326 case 0x8f: /* pop (sole member of Grp1a) */
3327 rc = emulate_grp1a(ctxt, ops);
3329 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3330 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3333 case 0x98: /* cbw/cwde/cdqe */
3334 switch (c->op_bytes) {
3335 case 2: c->dst.val = (s8)c->dst.val; break;
3336 case 4: c->dst.val = (s16)c->dst.val; break;
3337 case 8: c->dst.val = (s32)c->dst.val; break;
3340 case 0x9c: /* pushf */
3341 c->src.val = (unsigned long) ctxt->eflags;
3342 emulate_push(ctxt, ops);
3344 case 0x9d: /* popf */
3345 c->dst.type = OP_REG;
3346 c->dst.addr.reg = &ctxt->eflags;
3347 c->dst.bytes = c->op_bytes;
3348 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3350 case 0xa6 ... 0xa7: /* cmps */
3351 c->dst.type = OP_NONE; /* Disable writeback. */
3353 case 0xa8 ... 0xa9: /* test ax, imm */
3355 case 0xae ... 0xaf: /* scas */
3360 case 0xc3: /* ret */
3361 c->dst.type = OP_REG;
3362 c->dst.addr.reg = &c->eip;
3363 c->dst.bytes = c->op_bytes;
3364 goto pop_instruction;
3365 case 0xc4: /* les */
3366 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3368 case 0xc5: /* lds */
3369 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3371 case 0xcb: /* ret far */
3372 rc = emulate_ret_far(ctxt, ops);
3374 case 0xcc: /* int3 */
3377 case 0xcd: /* int n */
3380 rc = emulate_int(ctxt, ops, irq);
3382 case 0xce: /* into */
3383 if (ctxt->eflags & EFLG_OF) {
3388 case 0xcf: /* iret */
3389 rc = emulate_iret(ctxt, ops);
3391 case 0xd0 ... 0xd1: /* Grp2 */
3394 case 0xd2 ... 0xd3: /* Grp2 */
3395 c->src.val = c->regs[VCPU_REGS_RCX];
3398 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3399 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3400 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3401 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3402 jmp_rel(c, c->src.val);
3404 case 0xe3: /* jcxz/jecxz/jrcxz */
3405 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3406 jmp_rel(c, c->src.val);
3408 case 0xe4: /* inb */
3411 case 0xe6: /* outb */
3412 case 0xe7: /* out */
3414 case 0xe8: /* call (near) */ {
3415 long int rel = c->src.val;
3416 c->src.val = (unsigned long) c->eip;
3418 emulate_push(ctxt, ops);
3421 case 0xe9: /* jmp rel */
3423 case 0xea: { /* jmp far */
3426 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3428 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3432 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3436 jmp: /* jmp rel short */
3437 jmp_rel(c, c->src.val);
3438 c->dst.type = OP_NONE; /* Disable writeback. */
3440 case 0xec: /* in al,dx */
3441 case 0xed: /* in (e/r)ax,dx */
3442 c->src.val = c->regs[VCPU_REGS_RDX];
3444 c->dst.bytes = min(c->dst.bytes, 4u);
3445 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3446 rc = emulate_gp(ctxt, 0);
3449 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3451 goto done; /* IO is needed */
3453 case 0xee: /* out dx,al */
3454 case 0xef: /* out dx,(e/r)ax */
3455 c->dst.val = c->regs[VCPU_REGS_RDX];
3457 c->src.bytes = min(c->src.bytes, 4u);
3458 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3460 rc = emulate_gp(ctxt, 0);
3463 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3464 &c->src.val, 1, ctxt->vcpu);
3465 c->dst.type = OP_NONE; /* Disable writeback. */
3467 case 0xf4: /* hlt */
3468 ctxt->vcpu->arch.halt_request = 1;
3470 case 0xf5: /* cmc */
3471 /* complement carry flag from eflags reg */
3472 ctxt->eflags ^= EFLG_CF;
3474 case 0xf6 ... 0xf7: /* Grp3 */
3475 rc = emulate_grp3(ctxt, ops);
3477 case 0xf8: /* clc */
3478 ctxt->eflags &= ~EFLG_CF;
3480 case 0xf9: /* stc */
3481 ctxt->eflags |= EFLG_CF;
3483 case 0xfa: /* cli */
3484 if (emulator_bad_iopl(ctxt, ops)) {
3485 rc = emulate_gp(ctxt, 0);
3488 ctxt->eflags &= ~X86_EFLAGS_IF;
3490 case 0xfb: /* sti */
3491 if (emulator_bad_iopl(ctxt, ops)) {
3492 rc = emulate_gp(ctxt, 0);
3495 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3496 ctxt->eflags |= X86_EFLAGS_IF;
3499 case 0xfc: /* cld */
3500 ctxt->eflags &= ~EFLG_DF;
3502 case 0xfd: /* std */
3503 ctxt->eflags |= EFLG_DF;
3505 case 0xfe: /* Grp4 */
3507 rc = emulate_grp45(ctxt, ops);
3509 case 0xff: /* Grp5 */
3510 if (c->modrm_reg == 5)
3514 goto cannot_emulate;
3517 if (rc != X86EMUL_CONTINUE)
3521 rc = writeback(ctxt, ops);
3522 if (rc != X86EMUL_CONTINUE)
3526 * restore dst type in case the decoding will be reused
3527 * (happens for string instruction )
3529 c->dst.type = saved_dst_type;
3531 if ((c->d & SrcMask) == SrcSI)
3532 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3533 VCPU_REGS_RSI, &c->src);
3535 if ((c->d & DstMask) == DstDI)
3536 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3539 if (c->rep_prefix && (c->d & String)) {
3540 struct read_cache *r = &ctxt->decode.io_read;
3541 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3543 if (!string_insn_completed(ctxt)) {
3545 * Re-enter guest when pio read ahead buffer is empty
3546 * or, if it is not used, after each 1024 iteration.
3548 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3549 (r->end == 0 || r->end != r->pos)) {
3551 * Reset read cache. Usually happens before
3552 * decode, but since instruction is restarted
3553 * we have to do it here.
3555 ctxt->decode.mem_read.end = 0;
3556 return EMULATION_RESTART;
3558 goto done; /* skip rip writeback */
3565 if (rc == X86EMUL_PROPAGATE_FAULT)
3566 ctxt->have_exception = true;
3567 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3571 case 0x01: /* lgdt, lidt, lmsw */
3572 switch (c->modrm_reg) {
3574 unsigned long address;
3576 case 0: /* vmcall */
3577 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3578 goto cannot_emulate;
3580 rc = kvm_fix_hypercall(ctxt->vcpu);
3581 if (rc != X86EMUL_CONTINUE)
3584 /* Let the processor re-execute the fixed hypercall */
3586 /* Disable writeback. */
3587 c->dst.type = OP_NONE;
3590 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3591 &size, &address, c->op_bytes);
3592 if (rc != X86EMUL_CONTINUE)
3594 realmode_lgdt(ctxt->vcpu, size, address);
3595 /* Disable writeback. */
3596 c->dst.type = OP_NONE;
3598 case 3: /* lidt/vmmcall */
3599 if (c->modrm_mod == 3) {
3600 switch (c->modrm_rm) {
3602 rc = kvm_fix_hypercall(ctxt->vcpu);
3605 goto cannot_emulate;
3608 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3611 if (rc != X86EMUL_CONTINUE)
3613 realmode_lidt(ctxt->vcpu, size, address);
3615 /* Disable writeback. */
3616 c->dst.type = OP_NONE;
3620 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3623 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3624 (c->src.val & 0x0f), ctxt->vcpu);
3625 c->dst.type = OP_NONE;
3627 case 5: /* not defined */
3629 rc = X86EMUL_PROPAGATE_FAULT;
3632 emulate_invlpg(ctxt->vcpu,
3633 linear(ctxt, c->src.addr.mem));
3634 /* Disable writeback. */
3635 c->dst.type = OP_NONE;
3638 goto cannot_emulate;
3641 case 0x05: /* syscall */
3642 rc = emulate_syscall(ctxt, ops);
3645 emulate_clts(ctxt->vcpu);
3647 case 0x09: /* wbinvd */
3648 kvm_emulate_wbinvd(ctxt->vcpu);
3650 case 0x08: /* invd */
3651 case 0x0d: /* GrpP (prefetch) */
3652 case 0x18: /* Grp16 (prefetch/nop) */
3654 case 0x20: /* mov cr, reg */
3655 switch (c->modrm_reg) {
3660 rc = X86EMUL_PROPAGATE_FAULT;
3663 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3665 case 0x21: /* mov from dr to reg */
3666 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3667 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3669 rc = X86EMUL_PROPAGATE_FAULT;
3672 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3674 case 0x22: /* mov reg, cr */
3675 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3676 emulate_gp(ctxt, 0);
3677 rc = X86EMUL_PROPAGATE_FAULT;
3680 c->dst.type = OP_NONE;
3682 case 0x23: /* mov from reg to dr */
3683 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3684 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3686 rc = X86EMUL_PROPAGATE_FAULT;
3690 if (ops->set_dr(c->modrm_reg, c->src.val &
3691 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3692 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3693 /* #UD condition is already handled by the code above */
3694 emulate_gp(ctxt, 0);
3695 rc = X86EMUL_PROPAGATE_FAULT;
3699 c->dst.type = OP_NONE; /* no writeback */
3703 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3704 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3705 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3706 emulate_gp(ctxt, 0);
3707 rc = X86EMUL_PROPAGATE_FAULT;
3710 rc = X86EMUL_CONTINUE;
3714 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3715 emulate_gp(ctxt, 0);
3716 rc = X86EMUL_PROPAGATE_FAULT;
3719 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3720 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3722 rc = X86EMUL_CONTINUE;
3724 case 0x34: /* sysenter */
3725 rc = emulate_sysenter(ctxt, ops);
3727 case 0x35: /* sysexit */
3728 rc = emulate_sysexit(ctxt, ops);
3730 case 0x40 ... 0x4f: /* cmov */
3731 c->dst.val = c->dst.orig_val = c->src.val;
3732 if (!test_cc(c->b, ctxt->eflags))
3733 c->dst.type = OP_NONE; /* no writeback */
3735 case 0x80 ... 0x8f: /* jnz rel, etc*/
3736 if (test_cc(c->b, ctxt->eflags))
3737 jmp_rel(c, c->src.val);
3739 case 0x90 ... 0x9f: /* setcc r/m8 */
3740 c->dst.val = test_cc(c->b, ctxt->eflags);
3742 case 0xa0: /* push fs */
3743 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3745 case 0xa1: /* pop fs */
3746 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3750 c->dst.type = OP_NONE;
3751 /* only subword offset */
3752 c->src.val &= (c->dst.bytes << 3) - 1;
3753 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3755 case 0xa4: /* shld imm8, r, r/m */
3756 case 0xa5: /* shld cl, r, r/m */
3757 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3759 case 0xa8: /* push gs */
3760 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3762 case 0xa9: /* pop gs */
3763 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3767 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3769 case 0xac: /* shrd imm8, r, r/m */
3770 case 0xad: /* shrd cl, r, r/m */
3771 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3773 case 0xae: /* clflush */
3775 case 0xb0 ... 0xb1: /* cmpxchg */
3777 * Save real source value, then compare EAX against
3780 c->src.orig_val = c->src.val;
3781 c->src.val = c->regs[VCPU_REGS_RAX];
3782 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3783 if (ctxt->eflags & EFLG_ZF) {
3784 /* Success: write back to memory. */
3785 c->dst.val = c->src.orig_val;
3787 /* Failure: write the value we saw to EAX. */
3788 c->dst.type = OP_REG;
3789 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3792 case 0xb2: /* lss */
3793 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3797 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3799 case 0xb4: /* lfs */
3800 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3802 case 0xb5: /* lgs */
3803 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3805 case 0xb6 ... 0xb7: /* movzx */
3806 c->dst.bytes = c->op_bytes;
3807 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3810 case 0xba: /* Grp8 */
3811 switch (c->modrm_reg & 3) {
3824 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3826 case 0xbc: { /* bsf */
3828 __asm__ ("bsf %2, %0; setz %1"
3829 : "=r"(c->dst.val), "=q"(zf)
3831 ctxt->eflags &= ~X86_EFLAGS_ZF;
3833 ctxt->eflags |= X86_EFLAGS_ZF;
3834 c->dst.type = OP_NONE; /* Disable writeback. */
3838 case 0xbd: { /* bsr */
3840 __asm__ ("bsr %2, %0; setz %1"
3841 : "=r"(c->dst.val), "=q"(zf)
3843 ctxt->eflags &= ~X86_EFLAGS_ZF;
3845 ctxt->eflags |= X86_EFLAGS_ZF;
3846 c->dst.type = OP_NONE; /* Disable writeback. */
3850 case 0xbe ... 0xbf: /* movsx */
3851 c->dst.bytes = c->op_bytes;
3852 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3855 case 0xc0 ... 0xc1: /* xadd */
3856 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3857 /* Write back the register source. */
3858 c->src.val = c->dst.orig_val;
3859 write_register_operand(&c->src);
3861 case 0xc3: /* movnti */
3862 c->dst.bytes = c->op_bytes;
3863 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3866 case 0xc7: /* Grp9 (cmpxchg8b) */
3867 rc = emulate_grp9(ctxt, ops);
3870 goto cannot_emulate;
3873 if (rc != X86EMUL_CONTINUE)