1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
80 #define RMExt (1<<18) /* Opcode extension in ModRM r/m if mod == 3 */
82 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
83 #define VendorSpecific (1<<22) /* Vendor specific instruction */
84 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
85 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
86 #define Undefined (1<<25) /* No Such Instruction */
87 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
88 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90 /* Source 2 operand type */
91 #define Src2None (0<<29)
92 #define Src2CL (1<<29)
93 #define Src2ImmByte (2<<29)
94 #define Src2One (3<<29)
95 #define Src2Imm (4<<29)
96 #define Src2Mask (7<<29)
99 #define X3(x...) X2(x), x
100 #define X4(x...) X2(x), X2(x)
101 #define X5(x...) X4(x), x
102 #define X6(x...) X4(x), X2(x)
103 #define X7(x...) X4(x), X3(x)
104 #define X8(x...) X4(x), X4(x)
105 #define X16(x...) X8(x), X8(x)
111 int (*execute)(struct x86_emulate_ctxt *ctxt);
112 struct opcode *group;
113 struct group_dual *gdual;
114 struct gprefix *gprefix;
116 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120 struct opcode mod012[8];
121 struct opcode mod3[8];
125 struct opcode pfx_no;
126 struct opcode pfx_66;
127 struct opcode pfx_f2;
128 struct opcode pfx_f3;
131 /* EFLAGS bit definitions. */
132 #define EFLG_ID (1<<21)
133 #define EFLG_VIP (1<<20)
134 #define EFLG_VIF (1<<19)
135 #define EFLG_AC (1<<18)
136 #define EFLG_VM (1<<17)
137 #define EFLG_RF (1<<16)
138 #define EFLG_IOPL (3<<12)
139 #define EFLG_NT (1<<14)
140 #define EFLG_OF (1<<11)
141 #define EFLG_DF (1<<10)
142 #define EFLG_IF (1<<9)
143 #define EFLG_TF (1<<8)
144 #define EFLG_SF (1<<7)
145 #define EFLG_ZF (1<<6)
146 #define EFLG_AF (1<<4)
147 #define EFLG_PF (1<<2)
148 #define EFLG_CF (1<<0)
150 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
151 #define EFLG_RESERVED_ONE_MASK 2
154 * Instruction emulation:
155 * Most instructions are emulated directly via a fragment of inline assembly
156 * code. This allows us to save/restore EFLAGS and thus very easily pick up
157 * any modified flags.
160 #if defined(CONFIG_X86_64)
161 #define _LO32 "k" /* force 32-bit operand */
162 #define _STK "%%rsp" /* stack pointer */
163 #elif defined(__i386__)
164 #define _LO32 "" /* force 32-bit operand */
165 #define _STK "%%esp" /* stack pointer */
169 * These EFLAGS bits are restored from saved value during emulation, and
170 * any changes are written back to the saved value after emulation.
172 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
174 /* Before executing instruction: restore necessary bits in EFLAGS. */
175 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
176 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
177 "movl %"_sav",%"_LO32 _tmp"; " \
180 "movl %"_msk",%"_LO32 _tmp"; " \
181 "andl %"_LO32 _tmp",("_STK"); " \
183 "notl %"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
187 "orl %"_LO32 _tmp",("_STK"); " \
191 /* After executing instruction: write-back necessary bits in EFLAGS. */
192 #define _POST_EFLAGS(_sav, _msk, _tmp) \
193 /* _sav |= EFLAGS & _msk; */ \
196 "andl %"_msk",%"_LO32 _tmp"; " \
197 "orl %"_LO32 _tmp",%"_sav"; "
205 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
207 __asm__ __volatile__ ( \
208 _PRE_EFLAGS("0", "4", "2") \
209 _op _suffix " %"_x"3,%1; " \
210 _POST_EFLAGS("0", "4", "2") \
211 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
213 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 /* Raw emulation: instruction has two explicit operands. */
218 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
220 unsigned long _tmp; \
222 switch ((_dst).bytes) { \
224 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
227 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
230 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
235 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
237 unsigned long _tmp; \
238 switch ((_dst).bytes) { \
240 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
243 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
244 _wx, _wy, _lx, _ly, _qx, _qy); \
249 /* Source operand is byte-sized and may be restricted to just %cl. */
250 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
251 __emulate_2op(_op, _src, _dst, _eflags, \
252 "b", "c", "b", "c", "b", "c", "b", "c")
254 /* Source operand is byte, word, long or quad sized. */
255 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
256 __emulate_2op(_op, _src, _dst, _eflags, \
257 "b", "q", "w", "r", _LO32, "r", "", "r")
259 /* Source operand is word, long or quad sized. */
260 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
261 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
262 "w", "r", _LO32, "r", "", "r")
264 /* Instruction has three operands and one operand is stored in ECX register */
265 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
267 unsigned long _tmp; \
268 _type _clv = (_cl).val; \
269 _type _srcv = (_src).val; \
270 _type _dstv = (_dst).val; \
272 __asm__ __volatile__ ( \
273 _PRE_EFLAGS("0", "5", "2") \
274 _op _suffix " %4,%1 \n" \
275 _POST_EFLAGS("0", "5", "2") \
276 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
277 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
280 (_cl).val = (unsigned long) _clv; \
281 (_src).val = (unsigned long) _srcv; \
282 (_dst).val = (unsigned long) _dstv; \
285 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
287 switch ((_dst).bytes) { \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "w", unsigned short); \
293 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "l", unsigned int); \
297 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
298 "q", unsigned long)); \
303 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
305 unsigned long _tmp; \
307 __asm__ __volatile__ ( \
308 _PRE_EFLAGS("0", "3", "2") \
309 _op _suffix " %1; " \
310 _POST_EFLAGS("0", "3", "2") \
311 : "=m" (_eflags), "+m" ((_dst).val), \
313 : "i" (EFLAGS_MASK)); \
316 /* Instruction has only one explicit operand (no source operand). */
317 #define emulate_1op(_op, _dst, _eflags) \
319 switch ((_dst).bytes) { \
320 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
321 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
322 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
323 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
329 unsigned long _tmp; \
331 __asm__ __volatile__ ( \
332 _PRE_EFLAGS("0", "4", "1") \
333 _op _suffix " %5; " \
334 _POST_EFLAGS("0", "4", "1") \
335 : "=m" (_eflags), "=&r" (_tmp), \
336 "+a" (_rax), "+d" (_rdx) \
337 : "i" (EFLAGS_MASK), "m" ((_src).val), \
338 "a" (_rax), "d" (_rdx)); \
341 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
343 unsigned long _tmp; \
345 __asm__ __volatile__ ( \
346 _PRE_EFLAGS("0", "5", "1") \
348 _op _suffix " %6; " \
350 _POST_EFLAGS("0", "5", "1") \
351 ".pushsection .fixup,\"ax\" \n\t" \
352 "3: movb $1, %4 \n\t" \
355 _ASM_EXTABLE(1b, 3b) \
356 : "=m" (_eflags), "=&r" (_tmp), \
357 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
358 : "i" (EFLAGS_MASK), "m" ((_src).val), \
359 "a" (_rax), "d" (_rdx)); \
362 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
363 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
365 switch((_src).bytes) { \
367 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
371 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
375 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
379 ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
385 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
387 switch((_src).bytes) { \
389 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
390 _eflags, "b", _ex); \
393 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
394 _eflags, "w", _ex); \
397 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
398 _eflags, "l", _ex); \
401 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
402 _eflags, "q", _ex)); \
407 /* Fetch next part of the instruction being emulated. */
408 #define insn_fetch(_type, _size, _eip) \
409 ({ unsigned long _x; \
410 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
411 if (rc != X86EMUL_CONTINUE) \
417 #define insn_fetch_arr(_arr, _size, _eip) \
418 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
419 if (rc != X86EMUL_CONTINUE) \
424 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
425 enum x86_intercept intercept,
426 enum x86_intercept_stage stage)
428 struct x86_instruction_info info = {
429 .intercept = intercept,
430 .rep_prefix = ctxt->decode.rep_prefix,
431 .modrm_mod = ctxt->decode.modrm_mod,
432 .modrm_reg = ctxt->decode.modrm_reg,
433 .modrm_rm = ctxt->decode.modrm_rm,
434 .src_val = ctxt->decode.src.val64,
435 .src_bytes = ctxt->decode.src.bytes,
436 .dst_bytes = ctxt->decode.dst.bytes,
437 .ad_bytes = ctxt->decode.ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static inline unsigned long ad_mask(struct decode_cache *c)
446 return (1UL << (c->ad_bytes << 3)) - 1;
449 /* Access/update address held in a register, based on addressing mode. */
450 static inline unsigned long
451 address_mask(struct decode_cache *c, unsigned long reg)
453 if (c->ad_bytes == sizeof(unsigned long))
456 return reg & ad_mask(c);
459 static inline unsigned long
460 register_address(struct decode_cache *c, unsigned long reg)
462 return address_mask(c, reg);
466 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
468 if (c->ad_bytes == sizeof(unsigned long))
471 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
474 static inline void jmp_rel(struct decode_cache *c, int rel)
476 register_address_increment(c, &c->eip, rel);
479 static u32 desc_limit_scaled(struct desc_struct *desc)
481 u32 limit = get_desc_limit(desc);
483 return desc->g ? (limit << 12) | 0xfff : limit;
486 static void set_seg_override(struct decode_cache *c, int seg)
488 c->has_seg_override = true;
489 c->seg_override = seg;
492 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
493 struct x86_emulate_ops *ops, int seg)
495 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
498 return ops->get_cached_segment_base(ctxt, seg);
501 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
502 struct x86_emulate_ops *ops,
503 struct decode_cache *c)
505 if (!c->has_seg_override)
508 return c->seg_override;
511 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
512 u32 error, bool valid)
514 ctxt->exception.vector = vec;
515 ctxt->exception.error_code = error;
516 ctxt->exception.error_code_valid = valid;
517 return X86EMUL_PROPAGATE_FAULT;
520 static int emulate_db(struct x86_emulate_ctxt *ctxt)
522 return emulate_exception(ctxt, DB_VECTOR, 0, false);
525 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
527 return emulate_exception(ctxt, GP_VECTOR, err, true);
530 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
532 return emulate_exception(ctxt, SS_VECTOR, err, true);
535 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
537 return emulate_exception(ctxt, UD_VECTOR, 0, false);
540 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
542 return emulate_exception(ctxt, TS_VECTOR, err, true);
545 static int emulate_de(struct x86_emulate_ctxt *ctxt)
547 return emulate_exception(ctxt, DE_VECTOR, 0, false);
550 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
552 return emulate_exception(ctxt, NM_VECTOR, 0, false);
555 static int __linearize(struct x86_emulate_ctxt *ctxt,
556 struct segmented_address addr,
557 unsigned size, bool write, bool fetch,
560 struct decode_cache *c = &ctxt->decode;
561 struct desc_struct desc;
567 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
568 switch (ctxt->mode) {
569 case X86EMUL_MODE_REAL:
571 case X86EMUL_MODE_PROT64:
572 if (((signed long)la << 16) >> 16 != la)
573 return emulate_gp(ctxt, 0);
576 usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL,
580 /* code segment or read-only data segment */
581 if (((desc.type & 8) || !(desc.type & 2)) && write)
583 /* unreadable code segment */
584 if (!fetch && (desc.type & 8) && !(desc.type & 2))
586 lim = desc_limit_scaled(&desc);
587 if ((desc.type & 8) || !(desc.type & 4)) {
588 /* expand-up segment */
589 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
592 /* exapand-down segment */
593 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
595 lim = desc.d ? 0xffffffff : 0xffff;
596 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
599 cpl = ctxt->ops->cpl(ctxt);
600 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3;
602 if (!(desc.type & 8)) {
606 } else if ((desc.type & 8) && !(desc.type & 4)) {
607 /* nonconforming code segment */
610 } else if ((desc.type & 8) && (desc.type & 4)) {
611 /* conforming code segment */
617 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
620 return X86EMUL_CONTINUE;
622 if (addr.seg == VCPU_SREG_SS)
623 return emulate_ss(ctxt, addr.seg);
625 return emulate_gp(ctxt, addr.seg);
628 static int linearize(struct x86_emulate_ctxt *ctxt,
629 struct segmented_address addr,
630 unsigned size, bool write,
633 return __linearize(ctxt, addr, size, write, false, linear);
637 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
638 struct segmented_address addr,
645 rc = linearize(ctxt, addr, size, false, &linear);
646 if (rc != X86EMUL_CONTINUE)
648 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
651 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
652 struct x86_emulate_ops *ops,
653 unsigned long eip, u8 *dest)
655 struct fetch_cache *fc = &ctxt->decode.fetch;
659 if (eip == fc->end) {
660 unsigned long linear;
661 struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
662 cur_size = fc->end - fc->start;
663 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
664 rc = __linearize(ctxt, addr, size, false, true, &linear);
665 if (rc != X86EMUL_CONTINUE)
667 rc = ops->fetch(ctxt, linear, fc->data + cur_size,
668 size, &ctxt->exception);
669 if (rc != X86EMUL_CONTINUE)
673 *dest = fc->data[eip - fc->start];
674 return X86EMUL_CONTINUE;
677 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
678 struct x86_emulate_ops *ops,
679 unsigned long eip, void *dest, unsigned size)
683 /* x86 instructions are limited to 15 bytes. */
684 if (eip + size - ctxt->eip > 15)
685 return X86EMUL_UNHANDLEABLE;
687 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
688 if (rc != X86EMUL_CONTINUE)
691 return X86EMUL_CONTINUE;
695 * Given the 'reg' portion of a ModRM byte, and a register block, return a
696 * pointer into the block that addresses the relevant register.
697 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
699 static void *decode_register(u8 modrm_reg, unsigned long *regs,
704 p = ®s[modrm_reg];
705 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
706 p = (unsigned char *)®s[modrm_reg & 3] + 1;
710 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
711 struct x86_emulate_ops *ops,
712 struct segmented_address addr,
713 u16 *size, unsigned long *address, int op_bytes)
720 rc = segmented_read_std(ctxt, addr, size, 2);
721 if (rc != X86EMUL_CONTINUE)
724 rc = segmented_read_std(ctxt, addr, address, op_bytes);
728 static int test_cc(unsigned int condition, unsigned int flags)
732 switch ((condition & 15) >> 1) {
734 rc |= (flags & EFLG_OF);
736 case 1: /* b/c/nae */
737 rc |= (flags & EFLG_CF);
740 rc |= (flags & EFLG_ZF);
743 rc |= (flags & (EFLG_CF|EFLG_ZF));
746 rc |= (flags & EFLG_SF);
749 rc |= (flags & EFLG_PF);
752 rc |= (flags & EFLG_ZF);
755 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
759 /* Odd condition identifiers (lsb == 1) have inverted sense. */
760 return (!!rc ^ (condition & 1));
763 static void fetch_register_operand(struct operand *op)
767 op->val = *(u8 *)op->addr.reg;
770 op->val = *(u16 *)op->addr.reg;
773 op->val = *(u32 *)op->addr.reg;
776 op->val = *(u64 *)op->addr.reg;
781 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
783 ctxt->ops->get_fpu(ctxt);
785 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
786 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
787 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
788 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
789 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
790 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
791 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
792 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
794 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
795 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
796 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
797 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
798 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
799 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
800 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
801 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
805 ctxt->ops->put_fpu(ctxt);
808 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
811 ctxt->ops->get_fpu(ctxt);
813 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
814 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
815 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
816 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
817 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
818 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
819 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
820 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
822 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
823 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
824 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
825 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
826 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
827 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
828 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
829 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
833 ctxt->ops->put_fpu(ctxt);
836 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
838 struct decode_cache *c,
841 unsigned reg = c->modrm_reg;
842 int highbyte_regs = c->rex_prefix == 0;
845 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
851 read_sse_reg(ctxt, &op->vec_val, reg);
856 if ((c->d & ByteOp) && !inhibit_bytereg) {
857 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
860 op->addr.reg = decode_register(reg, c->regs, 0);
861 op->bytes = c->op_bytes;
863 fetch_register_operand(op);
864 op->orig_val = op->val;
867 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
868 struct x86_emulate_ops *ops,
871 struct decode_cache *c = &ctxt->decode;
873 int index_reg = 0, base_reg = 0, scale;
874 int rc = X86EMUL_CONTINUE;
878 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
879 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
880 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
883 c->modrm = insn_fetch(u8, 1, c->eip);
884 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
885 c->modrm_reg |= (c->modrm & 0x38) >> 3;
886 c->modrm_rm |= (c->modrm & 0x07);
887 c->modrm_seg = VCPU_SREG_DS;
889 if (c->modrm_mod == 3) {
891 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
892 op->addr.reg = decode_register(c->modrm_rm,
893 c->regs, c->d & ByteOp);
897 op->addr.xmm = c->modrm_rm;
898 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
901 fetch_register_operand(op);
907 if (c->ad_bytes == 2) {
908 unsigned bx = c->regs[VCPU_REGS_RBX];
909 unsigned bp = c->regs[VCPU_REGS_RBP];
910 unsigned si = c->regs[VCPU_REGS_RSI];
911 unsigned di = c->regs[VCPU_REGS_RDI];
913 /* 16-bit ModR/M decode. */
914 switch (c->modrm_mod) {
916 if (c->modrm_rm == 6)
917 modrm_ea += insn_fetch(u16, 2, c->eip);
920 modrm_ea += insn_fetch(s8, 1, c->eip);
923 modrm_ea += insn_fetch(u16, 2, c->eip);
926 switch (c->modrm_rm) {
946 if (c->modrm_mod != 0)
953 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
954 (c->modrm_rm == 6 && c->modrm_mod != 0))
955 c->modrm_seg = VCPU_SREG_SS;
956 modrm_ea = (u16)modrm_ea;
958 /* 32/64-bit ModR/M decode. */
959 if ((c->modrm_rm & 7) == 4) {
960 sib = insn_fetch(u8, 1, c->eip);
961 index_reg |= (sib >> 3) & 7;
965 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
966 modrm_ea += insn_fetch(s32, 4, c->eip);
968 modrm_ea += c->regs[base_reg];
970 modrm_ea += c->regs[index_reg] << scale;
971 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
972 if (ctxt->mode == X86EMUL_MODE_PROT64)
975 modrm_ea += c->regs[c->modrm_rm];
976 switch (c->modrm_mod) {
978 if (c->modrm_rm == 5)
979 modrm_ea += insn_fetch(s32, 4, c->eip);
982 modrm_ea += insn_fetch(s8, 1, c->eip);
985 modrm_ea += insn_fetch(s32, 4, c->eip);
989 op->addr.mem.ea = modrm_ea;
994 static int decode_abs(struct x86_emulate_ctxt *ctxt,
995 struct x86_emulate_ops *ops,
998 struct decode_cache *c = &ctxt->decode;
999 int rc = X86EMUL_CONTINUE;
1002 switch (c->ad_bytes) {
1004 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
1007 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
1010 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
1017 static void fetch_bit_operand(struct decode_cache *c)
1021 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
1022 mask = ~(c->dst.bytes * 8 - 1);
1024 if (c->src.bytes == 2)
1025 sv = (s16)c->src.val & (s16)mask;
1026 else if (c->src.bytes == 4)
1027 sv = (s32)c->src.val & (s32)mask;
1029 c->dst.addr.mem.ea += (sv >> 3);
1032 /* only subword offset */
1033 c->src.val &= (c->dst.bytes << 3) - 1;
1036 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1037 struct x86_emulate_ops *ops,
1038 unsigned long addr, void *dest, unsigned size)
1041 struct read_cache *mc = &ctxt->decode.mem_read;
1044 int n = min(size, 8u);
1046 if (mc->pos < mc->end)
1049 rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1051 if (rc != X86EMUL_CONTINUE)
1056 memcpy(dest, mc->data + mc->pos, n);
1061 return X86EMUL_CONTINUE;
1064 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1065 struct segmented_address addr,
1072 rc = linearize(ctxt, addr, size, false, &linear);
1073 if (rc != X86EMUL_CONTINUE)
1075 return read_emulated(ctxt, ctxt->ops, linear, data, size);
1078 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1079 struct segmented_address addr,
1086 rc = linearize(ctxt, addr, size, true, &linear);
1087 if (rc != X86EMUL_CONTINUE)
1089 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1093 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1094 struct segmented_address addr,
1095 const void *orig_data, const void *data,
1101 rc = linearize(ctxt, addr, size, true, &linear);
1102 if (rc != X86EMUL_CONTINUE)
1104 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1105 size, &ctxt->exception);
1108 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1109 struct x86_emulate_ops *ops,
1110 unsigned int size, unsigned short port,
1113 struct read_cache *rc = &ctxt->decode.io_read;
1115 if (rc->pos == rc->end) { /* refill pio read ahead */
1116 struct decode_cache *c = &ctxt->decode;
1117 unsigned int in_page, n;
1118 unsigned int count = c->rep_prefix ?
1119 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1120 in_page = (ctxt->eflags & EFLG_DF) ?
1121 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1122 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1123 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1127 rc->pos = rc->end = 0;
1128 if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1133 memcpy(dest, rc->data + rc->pos, size);
1138 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1139 struct x86_emulate_ops *ops,
1140 u16 selector, struct desc_ptr *dt)
1142 if (selector & 1 << 2) {
1143 struct desc_struct desc;
1144 memset (dt, 0, sizeof *dt);
1145 if (!ops->get_cached_descriptor(ctxt, &desc, NULL,
1149 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1150 dt->address = get_desc_base(&desc);
1152 ops->get_gdt(ctxt, dt);
1155 /* allowed just for 8 bytes segments */
1156 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1157 struct x86_emulate_ops *ops,
1158 u16 selector, struct desc_struct *desc)
1161 u16 index = selector >> 3;
1165 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1167 if (dt.size < index * 8 + 7)
1168 return emulate_gp(ctxt, selector & 0xfffc);
1169 addr = dt.address + index * 8;
1170 ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1175 /* allowed just for 8 bytes segments */
1176 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1177 struct x86_emulate_ops *ops,
1178 u16 selector, struct desc_struct *desc)
1181 u16 index = selector >> 3;
1185 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1187 if (dt.size < index * 8 + 7)
1188 return emulate_gp(ctxt, selector & 0xfffc);
1190 addr = dt.address + index * 8;
1191 ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1196 /* Does not support long mode */
1197 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1198 struct x86_emulate_ops *ops,
1199 u16 selector, int seg)
1201 struct desc_struct seg_desc;
1203 unsigned err_vec = GP_VECTOR;
1205 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1208 memset(&seg_desc, 0, sizeof seg_desc);
1210 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1211 || ctxt->mode == X86EMUL_MODE_REAL) {
1212 /* set real mode segment descriptor */
1213 set_desc_base(&seg_desc, selector << 4);
1214 set_desc_limit(&seg_desc, 0xffff);
1221 /* NULL selector is not valid for TR, CS and SS */
1222 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1226 /* TR should be in GDT only */
1227 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1230 if (null_selector) /* for NULL selector skip all following checks */
1233 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1234 if (ret != X86EMUL_CONTINUE)
1237 err_code = selector & 0xfffc;
1238 err_vec = GP_VECTOR;
1240 /* can't load system descriptor into segment selecor */
1241 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1245 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1251 cpl = ops->cpl(ctxt);
1256 * segment is not a writable data segment or segment
1257 * selector's RPL != CPL or segment selector's RPL != CPL
1259 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1263 if (!(seg_desc.type & 8))
1266 if (seg_desc.type & 4) {
1272 if (rpl > cpl || dpl != cpl)
1275 /* CS(RPL) <- CPL */
1276 selector = (selector & 0xfffc) | cpl;
1279 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1282 case VCPU_SREG_LDTR:
1283 if (seg_desc.s || seg_desc.type != 2)
1286 default: /* DS, ES, FS, or GS */
1288 * segment is not a data or readable code segment or
1289 * ((segment is a data or nonconforming code segment)
1290 * and (both RPL and CPL > DPL))
1292 if ((seg_desc.type & 0xa) == 0x8 ||
1293 (((seg_desc.type & 0xc) != 0xc) &&
1294 (rpl > dpl && cpl > dpl)))
1300 /* mark segment as accessed */
1302 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1303 if (ret != X86EMUL_CONTINUE)
1307 ops->set_segment_selector(ctxt, selector, seg);
1308 ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
1309 return X86EMUL_CONTINUE;
1311 emulate_exception(ctxt, err_vec, err_code, true);
1312 return X86EMUL_PROPAGATE_FAULT;
1315 static void write_register_operand(struct operand *op)
1317 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1318 switch (op->bytes) {
1320 *(u8 *)op->addr.reg = (u8)op->val;
1323 *(u16 *)op->addr.reg = (u16)op->val;
1326 *op->addr.reg = (u32)op->val;
1327 break; /* 64b: zero-extend */
1329 *op->addr.reg = op->val;
1334 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1335 struct x86_emulate_ops *ops)
1338 struct decode_cache *c = &ctxt->decode;
1340 switch (c->dst.type) {
1342 write_register_operand(&c->dst);
1346 rc = segmented_cmpxchg(ctxt,
1352 rc = segmented_write(ctxt,
1356 if (rc != X86EMUL_CONTINUE)
1360 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1368 return X86EMUL_CONTINUE;
1371 static int em_push(struct x86_emulate_ctxt *ctxt)
1373 struct decode_cache *c = &ctxt->decode;
1374 struct segmented_address addr;
1376 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1377 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1378 addr.seg = VCPU_SREG_SS;
1380 /* Disable writeback. */
1381 c->dst.type = OP_NONE;
1382 return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
1385 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1386 struct x86_emulate_ops *ops,
1387 void *dest, int len)
1389 struct decode_cache *c = &ctxt->decode;
1391 struct segmented_address addr;
1393 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1394 addr.seg = VCPU_SREG_SS;
1395 rc = segmented_read(ctxt, addr, dest, len);
1396 if (rc != X86EMUL_CONTINUE)
1399 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1403 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1404 struct x86_emulate_ops *ops,
1405 void *dest, int len)
1408 unsigned long val, change_mask;
1409 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1410 int cpl = ops->cpl(ctxt);
1412 rc = emulate_pop(ctxt, ops, &val, len);
1413 if (rc != X86EMUL_CONTINUE)
1416 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1417 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1419 switch(ctxt->mode) {
1420 case X86EMUL_MODE_PROT64:
1421 case X86EMUL_MODE_PROT32:
1422 case X86EMUL_MODE_PROT16:
1424 change_mask |= EFLG_IOPL;
1426 change_mask |= EFLG_IF;
1428 case X86EMUL_MODE_VM86:
1430 return emulate_gp(ctxt, 0);
1431 change_mask |= EFLG_IF;
1433 default: /* real mode */
1434 change_mask |= (EFLG_IOPL | EFLG_IF);
1438 *(unsigned long *)dest =
1439 (ctxt->eflags & ~change_mask) | (val & change_mask);
1444 static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1445 struct x86_emulate_ops *ops, int seg)
1447 struct decode_cache *c = &ctxt->decode;
1449 c->src.val = ops->get_segment_selector(ctxt, seg);
1451 return em_push(ctxt);
1454 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1455 struct x86_emulate_ops *ops, int seg)
1457 struct decode_cache *c = &ctxt->decode;
1458 unsigned long selector;
1461 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1462 if (rc != X86EMUL_CONTINUE)
1465 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1469 static int emulate_pusha(struct x86_emulate_ctxt *ctxt)
1471 struct decode_cache *c = &ctxt->decode;
1472 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1473 int rc = X86EMUL_CONTINUE;
1474 int reg = VCPU_REGS_RAX;
1476 while (reg <= VCPU_REGS_RDI) {
1477 (reg == VCPU_REGS_RSP) ?
1478 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1481 if (rc != X86EMUL_CONTINUE)
1490 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1491 struct x86_emulate_ops *ops)
1493 struct decode_cache *c = &ctxt->decode;
1494 int rc = X86EMUL_CONTINUE;
1495 int reg = VCPU_REGS_RDI;
1497 while (reg >= VCPU_REGS_RAX) {
1498 if (reg == VCPU_REGS_RSP) {
1499 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1504 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1505 if (rc != X86EMUL_CONTINUE)
1512 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1513 struct x86_emulate_ops *ops, int irq)
1515 struct decode_cache *c = &ctxt->decode;
1522 /* TODO: Add limit checks */
1523 c->src.val = ctxt->eflags;
1525 if (rc != X86EMUL_CONTINUE)
1528 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1530 c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
1532 if (rc != X86EMUL_CONTINUE)
1535 c->src.val = c->eip;
1537 if (rc != X86EMUL_CONTINUE)
1540 ops->get_idt(ctxt, &dt);
1542 eip_addr = dt.address + (irq << 2);
1543 cs_addr = dt.address + (irq << 2) + 2;
1545 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1546 if (rc != X86EMUL_CONTINUE)
1549 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1550 if (rc != X86EMUL_CONTINUE)
1553 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1554 if (rc != X86EMUL_CONTINUE)
1562 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1563 struct x86_emulate_ops *ops, int irq)
1565 switch(ctxt->mode) {
1566 case X86EMUL_MODE_REAL:
1567 return emulate_int_real(ctxt, ops, irq);
1568 case X86EMUL_MODE_VM86:
1569 case X86EMUL_MODE_PROT16:
1570 case X86EMUL_MODE_PROT32:
1571 case X86EMUL_MODE_PROT64:
1573 /* Protected mode interrupts unimplemented yet */
1574 return X86EMUL_UNHANDLEABLE;
1578 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1579 struct x86_emulate_ops *ops)
1581 struct decode_cache *c = &ctxt->decode;
1582 int rc = X86EMUL_CONTINUE;
1583 unsigned long temp_eip = 0;
1584 unsigned long temp_eflags = 0;
1585 unsigned long cs = 0;
1586 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1587 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1588 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1589 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1591 /* TODO: Add stack limit check */
1593 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1595 if (rc != X86EMUL_CONTINUE)
1598 if (temp_eip & ~0xffff)
1599 return emulate_gp(ctxt, 0);
1601 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1603 if (rc != X86EMUL_CONTINUE)
1606 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1608 if (rc != X86EMUL_CONTINUE)
1611 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1613 if (rc != X86EMUL_CONTINUE)
1619 if (c->op_bytes == 4)
1620 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1621 else if (c->op_bytes == 2) {
1622 ctxt->eflags &= ~0xffff;
1623 ctxt->eflags |= temp_eflags;
1626 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1627 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1632 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1633 struct x86_emulate_ops* ops)
1635 switch(ctxt->mode) {
1636 case X86EMUL_MODE_REAL:
1637 return emulate_iret_real(ctxt, ops);
1638 case X86EMUL_MODE_VM86:
1639 case X86EMUL_MODE_PROT16:
1640 case X86EMUL_MODE_PROT32:
1641 case X86EMUL_MODE_PROT64:
1643 /* iret from protected mode unimplemented yet */
1644 return X86EMUL_UNHANDLEABLE;
1648 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1649 struct x86_emulate_ops *ops)
1651 struct decode_cache *c = &ctxt->decode;
1653 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1656 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1658 struct decode_cache *c = &ctxt->decode;
1659 switch (c->modrm_reg) {
1661 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1664 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1667 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1670 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1672 case 4: /* sal/shl */
1673 case 6: /* sal/shl */
1674 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1677 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1680 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1685 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1686 struct x86_emulate_ops *ops)
1688 struct decode_cache *c = &ctxt->decode;
1689 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1690 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1693 switch (c->modrm_reg) {
1694 case 0 ... 1: /* test */
1695 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1698 c->dst.val = ~c->dst.val;
1701 emulate_1op("neg", c->dst, ctxt->eflags);
1704 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1707 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1710 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1714 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1718 return X86EMUL_UNHANDLEABLE;
1721 return emulate_de(ctxt);
1722 return X86EMUL_CONTINUE;
1725 static int emulate_grp45(struct x86_emulate_ctxt *ctxt)
1727 struct decode_cache *c = &ctxt->decode;
1728 int rc = X86EMUL_CONTINUE;
1730 switch (c->modrm_reg) {
1732 emulate_1op("inc", c->dst, ctxt->eflags);
1735 emulate_1op("dec", c->dst, ctxt->eflags);
1737 case 2: /* call near abs */ {
1740 c->eip = c->src.val;
1741 c->src.val = old_eip;
1745 case 4: /* jmp abs */
1746 c->eip = c->src.val;
1755 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1756 struct x86_emulate_ops *ops)
1758 struct decode_cache *c = &ctxt->decode;
1759 u64 old = c->dst.orig_val64;
1761 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1762 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1763 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1764 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1765 ctxt->eflags &= ~EFLG_ZF;
1767 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1768 (u32) c->regs[VCPU_REGS_RBX];
1770 ctxt->eflags |= EFLG_ZF;
1772 return X86EMUL_CONTINUE;
1775 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1776 struct x86_emulate_ops *ops)
1778 struct decode_cache *c = &ctxt->decode;
1782 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1783 if (rc != X86EMUL_CONTINUE)
1785 if (c->op_bytes == 4)
1786 c->eip = (u32)c->eip;
1787 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1788 if (rc != X86EMUL_CONTINUE)
1790 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1794 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1795 struct x86_emulate_ops *ops, int seg)
1797 struct decode_cache *c = &ctxt->decode;
1801 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1803 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1804 if (rc != X86EMUL_CONTINUE)
1807 c->dst.val = c->src.val;
1812 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1813 struct x86_emulate_ops *ops, struct desc_struct *cs,
1814 struct desc_struct *ss)
1816 memset(cs, 0, sizeof(struct desc_struct));
1817 ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS);
1818 memset(ss, 0, sizeof(struct desc_struct));
1820 cs->l = 0; /* will be adjusted later */
1821 set_desc_base(cs, 0); /* flat segment */
1822 cs->g = 1; /* 4kb granularity */
1823 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1824 cs->type = 0x0b; /* Read, Execute, Accessed */
1826 cs->dpl = 0; /* will be adjusted later */
1830 set_desc_base(ss, 0); /* flat segment */
1831 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1832 ss->g = 1; /* 4kb granularity */
1834 ss->type = 0x03; /* Read/Write, Accessed */
1835 ss->d = 1; /* 32bit stack segment */
1841 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1843 struct decode_cache *c = &ctxt->decode;
1844 struct desc_struct cs, ss;
1849 /* syscall is not available in real mode */
1850 if (ctxt->mode == X86EMUL_MODE_REAL ||
1851 ctxt->mode == X86EMUL_MODE_VM86)
1852 return emulate_ud(ctxt);
1854 ops->get_msr(ctxt, MSR_EFER, &efer);
1855 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1857 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1859 cs_sel = (u16)(msr_data & 0xfffc);
1860 ss_sel = (u16)(msr_data + 8);
1862 if (efer & EFER_LMA) {
1866 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1867 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1868 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1869 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1871 c->regs[VCPU_REGS_RCX] = c->eip;
1872 if (efer & EFER_LMA) {
1873 #ifdef CONFIG_X86_64
1874 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1877 ctxt->mode == X86EMUL_MODE_PROT64 ?
1878 MSR_LSTAR : MSR_CSTAR, &msr_data);
1881 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1882 ctxt->eflags &= ~(msr_data | EFLG_RF);
1886 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1887 c->eip = (u32)msr_data;
1889 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1892 return X86EMUL_CONTINUE;
1896 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1898 struct decode_cache *c = &ctxt->decode;
1899 struct desc_struct cs, ss;
1904 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1905 /* inject #GP if in real mode */
1906 if (ctxt->mode == X86EMUL_MODE_REAL)
1907 return emulate_gp(ctxt, 0);
1909 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1910 * Therefore, we inject an #UD.
1912 if (ctxt->mode == X86EMUL_MODE_PROT64)
1913 return emulate_ud(ctxt);
1915 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1917 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1918 switch (ctxt->mode) {
1919 case X86EMUL_MODE_PROT32:
1920 if ((msr_data & 0xfffc) == 0x0)
1921 return emulate_gp(ctxt, 0);
1923 case X86EMUL_MODE_PROT64:
1924 if (msr_data == 0x0)
1925 return emulate_gp(ctxt, 0);
1929 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1930 cs_sel = (u16)msr_data;
1931 cs_sel &= ~SELECTOR_RPL_MASK;
1932 ss_sel = cs_sel + 8;
1933 ss_sel &= ~SELECTOR_RPL_MASK;
1934 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1939 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1940 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1941 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1942 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1944 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1947 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1948 c->regs[VCPU_REGS_RSP] = msr_data;
1950 return X86EMUL_CONTINUE;
1954 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1956 struct decode_cache *c = &ctxt->decode;
1957 struct desc_struct cs, ss;
1962 /* inject #GP if in real mode or Virtual 8086 mode */
1963 if (ctxt->mode == X86EMUL_MODE_REAL ||
1964 ctxt->mode == X86EMUL_MODE_VM86)
1965 return emulate_gp(ctxt, 0);
1967 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1969 if ((c->rex_prefix & 0x8) != 0x0)
1970 usermode = X86EMUL_MODE_PROT64;
1972 usermode = X86EMUL_MODE_PROT32;
1976 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1978 case X86EMUL_MODE_PROT32:
1979 cs_sel = (u16)(msr_data + 16);
1980 if ((msr_data & 0xfffc) == 0x0)
1981 return emulate_gp(ctxt, 0);
1982 ss_sel = (u16)(msr_data + 24);
1984 case X86EMUL_MODE_PROT64:
1985 cs_sel = (u16)(msr_data + 32);
1986 if (msr_data == 0x0)
1987 return emulate_gp(ctxt, 0);
1988 ss_sel = cs_sel + 8;
1993 cs_sel |= SELECTOR_RPL_MASK;
1994 ss_sel |= SELECTOR_RPL_MASK;
1996 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1997 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1998 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1999 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
2001 c->eip = c->regs[VCPU_REGS_RDX];
2002 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2004 return X86EMUL_CONTINUE;
2007 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2008 struct x86_emulate_ops *ops)
2011 if (ctxt->mode == X86EMUL_MODE_REAL)
2013 if (ctxt->mode == X86EMUL_MODE_VM86)
2015 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2016 return ops->cpl(ctxt) > iopl;
2019 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2020 struct x86_emulate_ops *ops,
2023 struct desc_struct tr_seg;
2026 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
2027 unsigned mask = (1 << len) - 1;
2030 ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR);
2033 if (desc_limit_scaled(&tr_seg) < 103)
2035 base = get_desc_base(&tr_seg);
2036 #ifdef CONFIG_X86_64
2037 base |= ((u64)base3) << 32;
2039 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2040 if (r != X86EMUL_CONTINUE)
2042 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2044 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2045 if (r != X86EMUL_CONTINUE)
2047 if ((perm >> bit_idx) & mask)
2052 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2053 struct x86_emulate_ops *ops,
2059 if (emulator_bad_iopl(ctxt, ops))
2060 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2063 ctxt->perm_ok = true;
2068 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2069 struct x86_emulate_ops *ops,
2070 struct tss_segment_16 *tss)
2072 struct decode_cache *c = &ctxt->decode;
2075 tss->flag = ctxt->eflags;
2076 tss->ax = c->regs[VCPU_REGS_RAX];
2077 tss->cx = c->regs[VCPU_REGS_RCX];
2078 tss->dx = c->regs[VCPU_REGS_RDX];
2079 tss->bx = c->regs[VCPU_REGS_RBX];
2080 tss->sp = c->regs[VCPU_REGS_RSP];
2081 tss->bp = c->regs[VCPU_REGS_RBP];
2082 tss->si = c->regs[VCPU_REGS_RSI];
2083 tss->di = c->regs[VCPU_REGS_RDI];
2085 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2086 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2087 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2088 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2089 tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2092 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2093 struct x86_emulate_ops *ops,
2094 struct tss_segment_16 *tss)
2096 struct decode_cache *c = &ctxt->decode;
2100 ctxt->eflags = tss->flag | 2;
2101 c->regs[VCPU_REGS_RAX] = tss->ax;
2102 c->regs[VCPU_REGS_RCX] = tss->cx;
2103 c->regs[VCPU_REGS_RDX] = tss->dx;
2104 c->regs[VCPU_REGS_RBX] = tss->bx;
2105 c->regs[VCPU_REGS_RSP] = tss->sp;
2106 c->regs[VCPU_REGS_RBP] = tss->bp;
2107 c->regs[VCPU_REGS_RSI] = tss->si;
2108 c->regs[VCPU_REGS_RDI] = tss->di;
2111 * SDM says that segment selectors are loaded before segment
2114 ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2115 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2116 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2117 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2118 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2121 * Now load segment descriptors. If fault happenes at this stage
2122 * it is handled in a context of new task
2124 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
2125 if (ret != X86EMUL_CONTINUE)
2127 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2128 if (ret != X86EMUL_CONTINUE)
2130 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2131 if (ret != X86EMUL_CONTINUE)
2133 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2134 if (ret != X86EMUL_CONTINUE)
2136 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2137 if (ret != X86EMUL_CONTINUE)
2140 return X86EMUL_CONTINUE;
2143 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2144 struct x86_emulate_ops *ops,
2145 u16 tss_selector, u16 old_tss_sel,
2146 ulong old_tss_base, struct desc_struct *new_desc)
2148 struct tss_segment_16 tss_seg;
2150 u32 new_tss_base = get_desc_base(new_desc);
2152 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2154 if (ret != X86EMUL_CONTINUE)
2155 /* FIXME: need to provide precise fault address */
2158 save_state_to_tss16(ctxt, ops, &tss_seg);
2160 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2162 if (ret != X86EMUL_CONTINUE)
2163 /* FIXME: need to provide precise fault address */
2166 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2168 if (ret != X86EMUL_CONTINUE)
2169 /* FIXME: need to provide precise fault address */
2172 if (old_tss_sel != 0xffff) {
2173 tss_seg.prev_task_link = old_tss_sel;
2175 ret = ops->write_std(ctxt, new_tss_base,
2176 &tss_seg.prev_task_link,
2177 sizeof tss_seg.prev_task_link,
2179 if (ret != X86EMUL_CONTINUE)
2180 /* FIXME: need to provide precise fault address */
2184 return load_state_from_tss16(ctxt, ops, &tss_seg);
2187 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2188 struct x86_emulate_ops *ops,
2189 struct tss_segment_32 *tss)
2191 struct decode_cache *c = &ctxt->decode;
2193 tss->cr3 = ops->get_cr(ctxt, 3);
2195 tss->eflags = ctxt->eflags;
2196 tss->eax = c->regs[VCPU_REGS_RAX];
2197 tss->ecx = c->regs[VCPU_REGS_RCX];
2198 tss->edx = c->regs[VCPU_REGS_RDX];
2199 tss->ebx = c->regs[VCPU_REGS_RBX];
2200 tss->esp = c->regs[VCPU_REGS_RSP];
2201 tss->ebp = c->regs[VCPU_REGS_RBP];
2202 tss->esi = c->regs[VCPU_REGS_RSI];
2203 tss->edi = c->regs[VCPU_REGS_RDI];
2205 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2206 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2207 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2208 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2209 tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS);
2210 tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS);
2211 tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2214 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2215 struct x86_emulate_ops *ops,
2216 struct tss_segment_32 *tss)
2218 struct decode_cache *c = &ctxt->decode;
2221 if (ops->set_cr(ctxt, 3, tss->cr3))
2222 return emulate_gp(ctxt, 0);
2224 ctxt->eflags = tss->eflags | 2;
2225 c->regs[VCPU_REGS_RAX] = tss->eax;
2226 c->regs[VCPU_REGS_RCX] = tss->ecx;
2227 c->regs[VCPU_REGS_RDX] = tss->edx;
2228 c->regs[VCPU_REGS_RBX] = tss->ebx;
2229 c->regs[VCPU_REGS_RSP] = tss->esp;
2230 c->regs[VCPU_REGS_RBP] = tss->ebp;
2231 c->regs[VCPU_REGS_RSI] = tss->esi;
2232 c->regs[VCPU_REGS_RDI] = tss->edi;
2235 * SDM says that segment selectors are loaded before segment
2238 ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2239 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2240 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2241 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2242 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2243 ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2244 ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2247 * Now load segment descriptors. If fault happenes at this stage
2248 * it is handled in a context of new task
2250 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2251 if (ret != X86EMUL_CONTINUE)
2253 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2254 if (ret != X86EMUL_CONTINUE)
2256 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2257 if (ret != X86EMUL_CONTINUE)
2259 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2260 if (ret != X86EMUL_CONTINUE)
2262 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2263 if (ret != X86EMUL_CONTINUE)
2265 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2266 if (ret != X86EMUL_CONTINUE)
2268 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2269 if (ret != X86EMUL_CONTINUE)
2272 return X86EMUL_CONTINUE;
2275 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2276 struct x86_emulate_ops *ops,
2277 u16 tss_selector, u16 old_tss_sel,
2278 ulong old_tss_base, struct desc_struct *new_desc)
2280 struct tss_segment_32 tss_seg;
2282 u32 new_tss_base = get_desc_base(new_desc);
2284 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2286 if (ret != X86EMUL_CONTINUE)
2287 /* FIXME: need to provide precise fault address */
2290 save_state_to_tss32(ctxt, ops, &tss_seg);
2292 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2294 if (ret != X86EMUL_CONTINUE)
2295 /* FIXME: need to provide precise fault address */
2298 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2300 if (ret != X86EMUL_CONTINUE)
2301 /* FIXME: need to provide precise fault address */
2304 if (old_tss_sel != 0xffff) {
2305 tss_seg.prev_task_link = old_tss_sel;
2307 ret = ops->write_std(ctxt, new_tss_base,
2308 &tss_seg.prev_task_link,
2309 sizeof tss_seg.prev_task_link,
2311 if (ret != X86EMUL_CONTINUE)
2312 /* FIXME: need to provide precise fault address */
2316 return load_state_from_tss32(ctxt, ops, &tss_seg);
2319 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2320 struct x86_emulate_ops *ops,
2321 u16 tss_selector, int reason,
2322 bool has_error_code, u32 error_code)
2324 struct desc_struct curr_tss_desc, next_tss_desc;
2326 u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR);
2327 ulong old_tss_base =
2328 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2331 /* FIXME: old_tss_base == ~0 ? */
2333 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2334 if (ret != X86EMUL_CONTINUE)
2336 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2337 if (ret != X86EMUL_CONTINUE)
2340 /* FIXME: check that next_tss_desc is tss */
2342 if (reason != TASK_SWITCH_IRET) {
2343 if ((tss_selector & 3) > next_tss_desc.dpl ||
2344 ops->cpl(ctxt) > next_tss_desc.dpl)
2345 return emulate_gp(ctxt, 0);
2348 desc_limit = desc_limit_scaled(&next_tss_desc);
2349 if (!next_tss_desc.p ||
2350 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2351 desc_limit < 0x2b)) {
2352 emulate_ts(ctxt, tss_selector & 0xfffc);
2353 return X86EMUL_PROPAGATE_FAULT;
2356 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2357 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2358 write_segment_descriptor(ctxt, ops, old_tss_sel,
2362 if (reason == TASK_SWITCH_IRET)
2363 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2365 /* set back link to prev task only if NT bit is set in eflags
2366 note that old_tss_sel is not used afetr this point */
2367 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2368 old_tss_sel = 0xffff;
2370 if (next_tss_desc.type & 8)
2371 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2372 old_tss_base, &next_tss_desc);
2374 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2375 old_tss_base, &next_tss_desc);
2376 if (ret != X86EMUL_CONTINUE)
2379 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2380 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2382 if (reason != TASK_SWITCH_IRET) {
2383 next_tss_desc.type |= (1 << 1); /* set busy flag */
2384 write_segment_descriptor(ctxt, ops, tss_selector,
2388 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2389 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR);
2390 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
2392 if (has_error_code) {
2393 struct decode_cache *c = &ctxt->decode;
2395 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2397 c->src.val = (unsigned long) error_code;
2398 ret = em_push(ctxt);
2404 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2405 u16 tss_selector, int reason,
2406 bool has_error_code, u32 error_code)
2408 struct x86_emulate_ops *ops = ctxt->ops;
2409 struct decode_cache *c = &ctxt->decode;
2413 c->dst.type = OP_NONE;
2415 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2416 has_error_code, error_code);
2418 if (rc == X86EMUL_CONTINUE)
2421 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2424 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2425 int reg, struct operand *op)
2427 struct decode_cache *c = &ctxt->decode;
2428 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2430 register_address_increment(c, &c->regs[reg], df * op->bytes);
2431 op->addr.mem.ea = register_address(c, c->regs[reg]);
2432 op->addr.mem.seg = seg;
2435 static int em_das(struct x86_emulate_ctxt *ctxt)
2437 struct decode_cache *c = &ctxt->decode;
2439 bool af, cf, old_cf;
2441 cf = ctxt->eflags & X86_EFLAGS_CF;
2447 af = ctxt->eflags & X86_EFLAGS_AF;
2448 if ((al & 0x0f) > 9 || af) {
2450 cf = old_cf | (al >= 250);
2455 if (old_al > 0x99 || old_cf) {
2461 /* Set PF, ZF, SF */
2462 c->src.type = OP_IMM;
2465 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2466 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2468 ctxt->eflags |= X86_EFLAGS_CF;
2470 ctxt->eflags |= X86_EFLAGS_AF;
2471 return X86EMUL_CONTINUE;
2474 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2476 struct decode_cache *c = &ctxt->decode;
2481 old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2484 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2485 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2486 return X86EMUL_CONTINUE;
2489 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2491 c->src.val = old_cs;
2493 if (rc != X86EMUL_CONTINUE)
2496 c->src.val = old_eip;
2497 return em_push(ctxt);
2500 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2502 struct decode_cache *c = &ctxt->decode;
2505 c->dst.type = OP_REG;
2506 c->dst.addr.reg = &c->eip;
2507 c->dst.bytes = c->op_bytes;
2508 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2509 if (rc != X86EMUL_CONTINUE)
2511 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2512 return X86EMUL_CONTINUE;
2515 static int em_imul(struct x86_emulate_ctxt *ctxt)
2517 struct decode_cache *c = &ctxt->decode;
2519 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2520 return X86EMUL_CONTINUE;
2523 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2525 struct decode_cache *c = &ctxt->decode;
2527 c->dst.val = c->src2.val;
2528 return em_imul(ctxt);
2531 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2533 struct decode_cache *c = &ctxt->decode;
2535 c->dst.type = OP_REG;
2536 c->dst.bytes = c->src.bytes;
2537 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2538 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2540 return X86EMUL_CONTINUE;
2543 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2545 struct decode_cache *c = &ctxt->decode;
2548 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2549 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2550 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2551 return X86EMUL_CONTINUE;
2554 static int em_mov(struct x86_emulate_ctxt *ctxt)
2556 struct decode_cache *c = &ctxt->decode;
2557 c->dst.val = c->src.val;
2558 return X86EMUL_CONTINUE;
2561 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2563 struct decode_cache *c = &ctxt->decode;
2564 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2565 return X86EMUL_CONTINUE;
2568 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2570 struct decode_cache *c = &ctxt->decode;
2574 rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
2575 if (rc == X86EMUL_CONTINUE)
2576 ctxt->ops->invlpg(ctxt, linear);
2577 /* Disable writeback. */
2578 c->dst.type = OP_NONE;
2579 return X86EMUL_CONTINUE;
2582 static int em_clts(struct x86_emulate_ctxt *ctxt)
2586 cr0 = ctxt->ops->get_cr(ctxt, 0);
2588 ctxt->ops->set_cr(ctxt, 0, cr0);
2589 return X86EMUL_CONTINUE;
2592 static bool valid_cr(int nr)
2604 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2606 struct decode_cache *c = &ctxt->decode;
2608 if (!valid_cr(c->modrm_reg))
2609 return emulate_ud(ctxt);
2611 return X86EMUL_CONTINUE;
2614 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2616 struct decode_cache *c = &ctxt->decode;
2617 u64 new_val = c->src.val64;
2618 int cr = c->modrm_reg;
2621 static u64 cr_reserved_bits[] = {
2622 0xffffffff00000000ULL,
2623 0, 0, 0, /* CR3 checked later */
2630 return emulate_ud(ctxt);
2632 if (new_val & cr_reserved_bits[cr])
2633 return emulate_gp(ctxt, 0);
2638 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2639 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2640 return emulate_gp(ctxt, 0);
2642 cr4 = ctxt->ops->get_cr(ctxt, 4);
2643 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2645 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2646 !(cr4 & X86_CR4_PAE))
2647 return emulate_gp(ctxt, 0);
2654 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2655 if (efer & EFER_LMA)
2656 rsvd = CR3_L_MODE_RESERVED_BITS;
2657 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2658 rsvd = CR3_PAE_RESERVED_BITS;
2659 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2660 rsvd = CR3_NONPAE_RESERVED_BITS;
2663 return emulate_gp(ctxt, 0);
2670 cr4 = ctxt->ops->get_cr(ctxt, 4);
2671 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2673 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2674 return emulate_gp(ctxt, 0);
2680 return X86EMUL_CONTINUE;
2683 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2687 ctxt->ops->get_dr(ctxt, 7, &dr7);
2689 /* Check if DR7.Global_Enable is set */
2690 return dr7 & (1 << 13);
2693 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2695 struct decode_cache *c = &ctxt->decode;
2696 int dr = c->modrm_reg;
2700 return emulate_ud(ctxt);
2702 cr4 = ctxt->ops->get_cr(ctxt, 4);
2703 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2704 return emulate_ud(ctxt);
2706 if (check_dr7_gd(ctxt))
2707 return emulate_db(ctxt);
2709 return X86EMUL_CONTINUE;
2712 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2714 struct decode_cache *c = &ctxt->decode;
2715 u64 new_val = c->src.val64;
2716 int dr = c->modrm_reg;
2718 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2719 return emulate_gp(ctxt, 0);
2721 return check_dr_read(ctxt);
2724 static int check_svme(struct x86_emulate_ctxt *ctxt)
2728 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2730 if (!(efer & EFER_SVME))
2731 return emulate_ud(ctxt);
2733 return X86EMUL_CONTINUE;
2736 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2738 u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
2740 /* Valid physical address? */
2741 if (rax & 0xffff000000000000)
2742 return emulate_gp(ctxt, 0);
2744 return check_svme(ctxt);
2747 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2749 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2751 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2752 return emulate_ud(ctxt);
2754 return X86EMUL_CONTINUE;
2757 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2759 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2760 u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
2762 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2764 return emulate_gp(ctxt, 0);
2766 return X86EMUL_CONTINUE;
2769 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2771 struct decode_cache *c = &ctxt->decode;
2773 c->dst.bytes = min(c->dst.bytes, 4u);
2774 if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes))
2775 return emulate_gp(ctxt, 0);
2777 return X86EMUL_CONTINUE;
2780 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2782 struct decode_cache *c = &ctxt->decode;
2784 c->src.bytes = min(c->src.bytes, 4u);
2785 if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes))
2786 return emulate_gp(ctxt, 0);
2788 return X86EMUL_CONTINUE;
2791 #define D(_y) { .flags = (_y) }
2792 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2793 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2794 .check_perm = (_p) }
2796 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2797 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2798 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2799 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2800 #define II(_f, _e, _i) \
2801 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2802 #define IIP(_f, _e, _i, _p) \
2803 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2804 .check_perm = (_p) }
2805 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2807 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2808 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2809 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2811 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2812 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2813 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2815 static struct opcode group7_rm1[] = {
2816 DI(SrcNone | ModRM | Priv, monitor),
2817 DI(SrcNone | ModRM | Priv, mwait),
2821 static struct opcode group7_rm3[] = {
2822 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
2823 DI(SrcNone | ModRM | Prot | VendorSpecific, vmmcall),
2824 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
2825 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
2826 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
2827 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
2828 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
2829 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
2832 static struct opcode group7_rm7[] = {
2834 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
2837 static struct opcode group1[] = {
2841 static struct opcode group1A[] = {
2842 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2845 static struct opcode group3[] = {
2846 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2847 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2848 X4(D(SrcMem | ModRM)),
2851 static struct opcode group4[] = {
2852 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2856 static struct opcode group5[] = {
2857 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2858 D(SrcMem | ModRM | Stack),
2859 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2860 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2861 D(SrcMem | ModRM | Stack), N,
2864 static struct opcode group6[] = {
2865 DI(ModRM | Prot, sldt),
2866 DI(ModRM | Prot, str),
2867 DI(ModRM | Prot | Priv, lldt),
2868 DI(ModRM | Prot | Priv, ltr),
2872 static struct group_dual group7 = { {
2873 DI(ModRM | Mov | DstMem | Priv, sgdt),
2874 DI(ModRM | Mov | DstMem | Priv, sidt),
2875 DI(ModRM | SrcMem | Priv, lgdt), DI(ModRM | SrcMem | Priv, lidt),
2876 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2877 DI(SrcMem16 | ModRM | Mov | Priv, lmsw),
2878 DI(SrcMem | ModRM | ByteOp | Priv | NoAccess, invlpg),
2880 D(SrcNone | ModRM | Priv | VendorSpecific), EXT(0, group7_rm1),
2881 N, EXT(0, group7_rm3),
2882 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2883 DI(SrcMem16 | ModRM | Mov | Priv, lmsw), EXT(0, group7_rm7),
2886 static struct opcode group8[] = {
2888 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2889 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2892 static struct group_dual group9 = { {
2893 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2895 N, N, N, N, N, N, N, N,
2898 static struct opcode group11[] = {
2899 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2902 static struct gprefix pfx_0f_6f_0f_7f = {
2903 N, N, N, I(Sse, em_movdqu),
2906 static struct opcode opcode_table[256] = {
2909 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2912 D(ImplicitOps | Stack | No64), N,
2915 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2918 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2922 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2930 X8(I(SrcReg | Stack, em_push)),
2932 X8(D(DstReg | Stack)),
2934 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2935 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2938 I(SrcImm | Mov | Stack, em_push),
2939 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2940 I(SrcImmByte | Mov | Stack, em_push),
2941 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2942 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
2943 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
2947 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2948 G(DstMem | SrcImm | ModRM | Group, group1),
2949 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2950 G(DstMem | SrcImmByte | ModRM | Group, group1),
2951 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2953 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2954 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2955 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2956 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2958 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
2960 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2961 I(SrcImmFAddr | No64, em_call_far), N,
2962 DI(ImplicitOps | Stack, pushf), DI(ImplicitOps | Stack, popf), N, N,
2964 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2965 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2966 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2967 D2bv(SrcSI | DstDI | String),
2969 D2bv(DstAcc | SrcImm),
2970 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2971 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2972 D2bv(SrcAcc | DstDI | String),
2974 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2976 X8(I(DstReg | SrcImm | Mov, em_mov)),
2978 D2bv(DstMem | SrcImmByte | ModRM),
2979 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2980 D(ImplicitOps | Stack),
2981 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2982 G(ByteOp, group11), G(0, group11),
2984 N, N, N, D(ImplicitOps | Stack),
2985 D(ImplicitOps), DI(SrcImmByte, intn),
2986 D(ImplicitOps | No64), DI(ImplicitOps, iret),
2988 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2991 N, N, N, N, N, N, N, N,
2994 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
2995 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
2997 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2998 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2999 D2bvIP(SrcNone | DstAcc, in, check_perm_in),
3000 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
3002 N, DI(ImplicitOps, icebp), N, N,
3003 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3004 G(ByteOp, group3), G(0, group3),
3006 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
3007 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3010 static struct opcode twobyte_table[256] = {
3012 G(0, group6), GD(0, &group7), N, N,
3013 N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
3014 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3015 N, D(ImplicitOps | ModRM), N, N,
3017 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3019 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3020 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3021 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3022 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3024 N, N, N, N, N, N, N, N,
3026 DI(ImplicitOps | Priv, wrmsr),
3027 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3028 DI(ImplicitOps | Priv, rdmsr),
3029 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3030 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
3032 N, N, N, N, N, N, N, N,
3034 X16(D(DstReg | SrcMem | ModRM | Mov)),
3036 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3041 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3046 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3050 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3052 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3053 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3054 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3055 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3057 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3058 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3059 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3060 D(DstMem | SrcReg | Src2CL | ModRM),
3061 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3063 D2bv(DstMem | SrcReg | ModRM | Lock),
3064 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3065 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3066 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3069 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3070 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3071 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3073 D2bv(DstMem | SrcReg | ModRM | Lock),
3074 N, D(DstMem | SrcReg | ModRM | Mov),
3075 N, N, N, GD(0, &group9),
3076 N, N, N, N, N, N, N, N,
3078 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3080 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3082 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3098 static unsigned imm_size(struct decode_cache *c)
3102 size = (c->d & ByteOp) ? 1 : c->op_bytes;
3108 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3109 unsigned size, bool sign_extension)
3111 struct decode_cache *c = &ctxt->decode;
3112 struct x86_emulate_ops *ops = ctxt->ops;
3113 int rc = X86EMUL_CONTINUE;
3117 op->addr.mem.ea = c->eip;
3118 /* NB. Immediates are sign-extended as necessary. */
3119 switch (op->bytes) {
3121 op->val = insn_fetch(s8, 1, c->eip);
3124 op->val = insn_fetch(s16, 2, c->eip);
3127 op->val = insn_fetch(s32, 4, c->eip);
3130 if (!sign_extension) {
3131 switch (op->bytes) {
3139 op->val &= 0xffffffff;
3148 x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3150 struct x86_emulate_ops *ops = ctxt->ops;
3151 struct decode_cache *c = &ctxt->decode;
3152 int rc = X86EMUL_CONTINUE;
3153 int mode = ctxt->mode;
3154 int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
3155 bool op_prefix = false;
3156 struct opcode opcode, *g_mod012, *g_mod3;
3157 struct operand memop = { .type = OP_NONE };
3160 c->fetch.start = c->eip;
3161 c->fetch.end = c->fetch.start + insn_len;
3163 memcpy(c->fetch.data, insn, insn_len);
3166 case X86EMUL_MODE_REAL:
3167 case X86EMUL_MODE_VM86:
3168 case X86EMUL_MODE_PROT16:
3169 def_op_bytes = def_ad_bytes = 2;
3171 case X86EMUL_MODE_PROT32:
3172 def_op_bytes = def_ad_bytes = 4;
3174 #ifdef CONFIG_X86_64
3175 case X86EMUL_MODE_PROT64:
3184 c->op_bytes = def_op_bytes;
3185 c->ad_bytes = def_ad_bytes;
3187 /* Legacy prefixes. */
3189 switch (c->b = insn_fetch(u8, 1, c->eip)) {
3190 case 0x66: /* operand-size override */
3192 /* switch between 2/4 bytes */
3193 c->op_bytes = def_op_bytes ^ 6;
3195 case 0x67: /* address-size override */
3196 if (mode == X86EMUL_MODE_PROT64)
3197 /* switch between 4/8 bytes */
3198 c->ad_bytes = def_ad_bytes ^ 12;
3200 /* switch between 2/4 bytes */
3201 c->ad_bytes = def_ad_bytes ^ 6;
3203 case 0x26: /* ES override */
3204 case 0x2e: /* CS override */
3205 case 0x36: /* SS override */
3206 case 0x3e: /* DS override */
3207 set_seg_override(c, (c->b >> 3) & 3);
3209 case 0x64: /* FS override */
3210 case 0x65: /* GS override */
3211 set_seg_override(c, c->b & 7);
3213 case 0x40 ... 0x4f: /* REX */
3214 if (mode != X86EMUL_MODE_PROT64)
3216 c->rex_prefix = c->b;
3218 case 0xf0: /* LOCK */
3221 case 0xf2: /* REPNE/REPNZ */
3222 case 0xf3: /* REP/REPE/REPZ */
3223 c->rep_prefix = c->b;
3229 /* Any legacy prefix after a REX prefix nullifies its effect. */
3237 if (c->rex_prefix & 8)
3238 c->op_bytes = 8; /* REX.W */
3240 /* Opcode byte(s). */
3241 opcode = opcode_table[c->b];
3242 /* Two-byte opcode? */
3245 c->b = insn_fetch(u8, 1, c->eip);
3246 opcode = twobyte_table[c->b];
3248 c->d = opcode.flags;
3251 dual = c->d & GroupDual;
3252 c->modrm = insn_fetch(u8, 1, c->eip);
3255 if (c->d & GroupDual) {
3256 g_mod012 = opcode.u.gdual->mod012;
3257 g_mod3 = opcode.u.gdual->mod3;
3259 g_mod012 = g_mod3 = opcode.u.group;
3261 c->d &= ~(Group | GroupDual);
3263 goffset = (c->modrm >> 3) & 7;
3265 if ((c->modrm >> 6) == 3)
3266 opcode = g_mod3[goffset];
3268 opcode = g_mod012[goffset];
3270 if (opcode.flags & RMExt) {
3271 goffset = c->modrm & 7;
3272 opcode = opcode.u.group[goffset];
3275 c->d |= opcode.flags;
3278 if (c->d & Prefix) {
3279 if (c->rep_prefix && op_prefix)
3280 return X86EMUL_UNHANDLEABLE;
3281 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
3282 switch (simd_prefix) {
3283 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3284 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3285 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3286 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3288 c->d |= opcode.flags;
3291 c->execute = opcode.u.execute;
3292 c->check_perm = opcode.check_perm;
3293 c->intercept = opcode.intercept;
3296 if (c->d == 0 || (c->d & Undefined))
3299 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3302 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
3305 if (c->d & Op3264) {
3306 if (mode == X86EMUL_MODE_PROT64)
3315 /* ModRM and SIB bytes. */
3317 rc = decode_modrm(ctxt, ops, &memop);
3318 if (!c->has_seg_override)
3319 set_seg_override(c, c->modrm_seg);
3320 } else if (c->d & MemAbs)
3321 rc = decode_abs(ctxt, ops, &memop);
3322 if (rc != X86EMUL_CONTINUE)
3325 if (!c->has_seg_override)
3326 set_seg_override(c, VCPU_SREG_DS);
3328 memop.addr.mem.seg = seg_override(ctxt, ops, c);
3330 if (memop.type == OP_MEM && c->ad_bytes != 8)
3331 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3333 if (memop.type == OP_MEM && c->rip_relative)
3334 memop.addr.mem.ea += c->eip;
3337 * Decode and fetch the source operand: register, memory
3340 switch (c->d & SrcMask) {
3344 decode_register_operand(ctxt, &c->src, c, 0);
3353 memop.bytes = (c->d & ByteOp) ? 1 :
3359 rc = decode_imm(ctxt, &c->src, 2, false);
3362 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
3365 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
3368 rc = decode_imm(ctxt, &c->src, 1, true);
3371 rc = decode_imm(ctxt, &c->src, 1, false);
3374 c->src.type = OP_REG;
3375 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3376 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
3377 fetch_register_operand(&c->src);
3384 c->src.type = OP_MEM;
3385 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3386 c->src.addr.mem.ea =
3387 register_address(c, c->regs[VCPU_REGS_RSI]);
3388 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
3392 c->src.type = OP_IMM;
3393 c->src.addr.mem.ea = c->eip;
3394 c->src.bytes = c->op_bytes + 2;
3395 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
3398 memop.bytes = c->op_bytes + 2;
3403 if (rc != X86EMUL_CONTINUE)
3407 * Decode and fetch the second source operand: register, memory
3410 switch (c->d & Src2Mask) {
3415 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
3418 rc = decode_imm(ctxt, &c->src2, 1, true);
3425 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3429 if (rc != X86EMUL_CONTINUE)
3432 /* Decode and fetch the destination operand: register or memory. */
3433 switch (c->d & DstMask) {
3435 decode_register_operand(ctxt, &c->dst, c,
3436 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3439 c->dst.type = OP_IMM;
3440 c->dst.addr.mem.ea = c->eip;
3442 c->dst.val = insn_fetch(u8, 1, c->eip);
3447 if ((c->d & DstMask) == DstMem64)
3450 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3452 fetch_bit_operand(c);
3453 c->dst.orig_val = c->dst.val;
3456 c->dst.type = OP_REG;
3457 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3458 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3459 fetch_register_operand(&c->dst);
3460 c->dst.orig_val = c->dst.val;
3463 c->dst.type = OP_MEM;
3464 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3465 c->dst.addr.mem.ea =
3466 register_address(c, c->regs[VCPU_REGS_RDI]);
3467 c->dst.addr.mem.seg = VCPU_SREG_ES;
3471 /* Special instructions do their own operand decoding. */
3473 c->dst.type = OP_NONE; /* Disable writeback. */
3478 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3481 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3483 struct decode_cache *c = &ctxt->decode;
3485 /* The second termination condition only applies for REPE
3486 * and REPNE. Test if the repeat string operation prefix is
3487 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3488 * corresponding termination condition according to:
3489 * - if REPE/REPZ and ZF = 0 then done
3490 * - if REPNE/REPNZ and ZF = 1 then done
3492 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3493 (c->b == 0xae) || (c->b == 0xaf))
3494 && (((c->rep_prefix == REPE_PREFIX) &&
3495 ((ctxt->eflags & EFLG_ZF) == 0))
3496 || ((c->rep_prefix == REPNE_PREFIX) &&
3497 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3504 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3506 struct x86_emulate_ops *ops = ctxt->ops;
3508 struct decode_cache *c = &ctxt->decode;
3509 int rc = X86EMUL_CONTINUE;
3510 int saved_dst_type = c->dst.type;
3511 int irq; /* Used for int 3, int, and into */
3512 struct desc_ptr desc_ptr;
3514 ctxt->decode.mem_read.pos = 0;
3516 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3517 rc = emulate_ud(ctxt);
3521 /* LOCK prefix is allowed only with some instructions */
3522 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3523 rc = emulate_ud(ctxt);
3527 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3528 rc = emulate_ud(ctxt);
3533 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3534 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3535 rc = emulate_ud(ctxt);
3539 if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3540 rc = emulate_nm(ctxt);
3544 if (unlikely(ctxt->guest_mode) && c->intercept) {
3545 rc = emulator_check_intercept(ctxt, c->intercept,
3546 X86_ICPT_PRE_EXCEPT);
3547 if (rc != X86EMUL_CONTINUE)
3551 /* Privileged instruction can be executed only in CPL=0 */
3552 if ((c->d & Priv) && ops->cpl(ctxt)) {
3553 rc = emulate_gp(ctxt, 0);
3557 /* Instruction can only be executed in protected mode */
3558 if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3559 rc = emulate_ud(ctxt);
3563 /* Do instruction specific permission checks */
3564 if (c->check_perm) {
3565 rc = c->check_perm(ctxt);
3566 if (rc != X86EMUL_CONTINUE)
3570 if (unlikely(ctxt->guest_mode) && c->intercept) {
3571 rc = emulator_check_intercept(ctxt, c->intercept,
3572 X86_ICPT_POST_EXCEPT);
3573 if (rc != X86EMUL_CONTINUE)
3577 if (c->rep_prefix && (c->d & String)) {
3578 /* All REP prefixes have the same first termination condition */
3579 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3585 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3586 rc = segmented_read(ctxt, c->src.addr.mem,
3587 c->src.valptr, c->src.bytes);
3588 if (rc != X86EMUL_CONTINUE)
3590 c->src.orig_val64 = c->src.val64;
3593 if (c->src2.type == OP_MEM) {
3594 rc = segmented_read(ctxt, c->src2.addr.mem,
3595 &c->src2.val, c->src2.bytes);
3596 if (rc != X86EMUL_CONTINUE)
3600 if ((c->d & DstMask) == ImplicitOps)
3604 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3605 /* optimisation - avoid slow emulated read if Mov */
3606 rc = segmented_read(ctxt, c->dst.addr.mem,
3607 &c->dst.val, c->dst.bytes);
3608 if (rc != X86EMUL_CONTINUE)
3611 c->dst.orig_val = c->dst.val;
3615 if (unlikely(ctxt->guest_mode) && c->intercept) {
3616 rc = emulator_check_intercept(ctxt, c->intercept,
3617 X86_ICPT_POST_MEMACCESS);
3618 if (rc != X86EMUL_CONTINUE)
3623 rc = c->execute(ctxt);
3624 if (rc != X86EMUL_CONTINUE)
3635 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3637 case 0x06: /* push es */
3638 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3640 case 0x07: /* pop es */
3641 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3645 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3647 case 0x0e: /* push cs */
3648 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3652 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3654 case 0x16: /* push ss */
3655 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3657 case 0x17: /* pop ss */
3658 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3662 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3664 case 0x1e: /* push ds */
3665 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3667 case 0x1f: /* pop ds */
3668 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3672 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3676 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3680 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3684 c->dst.type = OP_NONE; /* Disable writeback. */
3685 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3687 case 0x40 ... 0x47: /* inc r16/r32 */
3688 emulate_1op("inc", c->dst, ctxt->eflags);
3690 case 0x48 ... 0x4f: /* dec r16/r32 */
3691 emulate_1op("dec", c->dst, ctxt->eflags);
3693 case 0x58 ... 0x5f: /* pop reg */
3695 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3697 case 0x60: /* pusha */
3698 rc = emulate_pusha(ctxt);
3700 case 0x61: /* popa */
3701 rc = emulate_popa(ctxt, ops);
3703 case 0x63: /* movsxd */
3704 if (ctxt->mode != X86EMUL_MODE_PROT64)
3705 goto cannot_emulate;
3706 c->dst.val = (s32) c->src.val;
3708 case 0x6c: /* insb */
3709 case 0x6d: /* insw/insd */
3710 c->src.val = c->regs[VCPU_REGS_RDX];
3712 case 0x6e: /* outsb */
3713 case 0x6f: /* outsw/outsd */
3714 c->dst.val = c->regs[VCPU_REGS_RDX];
3717 case 0x70 ... 0x7f: /* jcc (short) */
3718 if (test_cc(c->b, ctxt->eflags))
3719 jmp_rel(c, c->src.val);
3721 case 0x80 ... 0x83: /* Grp1 */
3722 switch (c->modrm_reg) {
3743 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3745 case 0x86 ... 0x87: /* xchg */
3747 /* Write back the register source. */
3748 c->src.val = c->dst.val;
3749 write_register_operand(&c->src);
3751 * Write back the memory destination with implicit LOCK
3754 c->dst.val = c->src.orig_val;
3757 case 0x8c: /* mov r/m, sreg */
3758 if (c->modrm_reg > VCPU_SREG_GS) {
3759 rc = emulate_ud(ctxt);
3762 c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg);
3764 case 0x8d: /* lea r16/r32, m */
3765 c->dst.val = c->src.addr.mem.ea;
3767 case 0x8e: { /* mov seg, r/m16 */
3772 if (c->modrm_reg == VCPU_SREG_CS ||
3773 c->modrm_reg > VCPU_SREG_GS) {
3774 rc = emulate_ud(ctxt);
3778 if (c->modrm_reg == VCPU_SREG_SS)
3779 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3781 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3783 c->dst.type = OP_NONE; /* Disable writeback. */
3786 case 0x8f: /* pop (sole member of Grp1a) */
3787 rc = emulate_grp1a(ctxt, ops);
3789 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3790 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3793 case 0x98: /* cbw/cwde/cdqe */
3794 switch (c->op_bytes) {
3795 case 2: c->dst.val = (s8)c->dst.val; break;
3796 case 4: c->dst.val = (s16)c->dst.val; break;
3797 case 8: c->dst.val = (s32)c->dst.val; break;
3800 case 0x9c: /* pushf */
3801 c->src.val = (unsigned long) ctxt->eflags;
3804 case 0x9d: /* popf */
3805 c->dst.type = OP_REG;
3806 c->dst.addr.reg = &ctxt->eflags;
3807 c->dst.bytes = c->op_bytes;
3808 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3810 case 0xa6 ... 0xa7: /* cmps */
3812 case 0xa8 ... 0xa9: /* test ax, imm */
3814 case 0xae ... 0xaf: /* scas */
3819 case 0xc3: /* ret */
3820 c->dst.type = OP_REG;
3821 c->dst.addr.reg = &c->eip;
3822 c->dst.bytes = c->op_bytes;
3823 goto pop_instruction;
3824 case 0xc4: /* les */
3825 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3827 case 0xc5: /* lds */
3828 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3830 case 0xcb: /* ret far */
3831 rc = emulate_ret_far(ctxt, ops);
3833 case 0xcc: /* int3 */
3836 case 0xcd: /* int n */
3839 rc = emulate_int(ctxt, ops, irq);
3841 case 0xce: /* into */
3842 if (ctxt->eflags & EFLG_OF) {
3847 case 0xcf: /* iret */
3848 rc = emulate_iret(ctxt, ops);
3850 case 0xd0 ... 0xd1: /* Grp2 */
3853 case 0xd2 ... 0xd3: /* Grp2 */
3854 c->src.val = c->regs[VCPU_REGS_RCX];
3857 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3858 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3859 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3860 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3861 jmp_rel(c, c->src.val);
3863 case 0xe3: /* jcxz/jecxz/jrcxz */
3864 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3865 jmp_rel(c, c->src.val);
3867 case 0xe4: /* inb */
3870 case 0xe6: /* outb */
3871 case 0xe7: /* out */
3873 case 0xe8: /* call (near) */ {
3874 long int rel = c->src.val;
3875 c->src.val = (unsigned long) c->eip;
3880 case 0xe9: /* jmp rel */
3882 case 0xea: { /* jmp far */
3885 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3887 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3891 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3895 jmp: /* jmp rel short */
3896 jmp_rel(c, c->src.val);
3897 c->dst.type = OP_NONE; /* Disable writeback. */
3899 case 0xec: /* in al,dx */
3900 case 0xed: /* in (e/r)ax,dx */
3901 c->src.val = c->regs[VCPU_REGS_RDX];
3903 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3905 goto done; /* IO is needed */
3907 case 0xee: /* out dx,al */
3908 case 0xef: /* out dx,(e/r)ax */
3909 c->dst.val = c->regs[VCPU_REGS_RDX];
3911 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
3913 c->dst.type = OP_NONE; /* Disable writeback. */
3915 case 0xf4: /* hlt */
3916 ctxt->ops->halt(ctxt);
3918 case 0xf5: /* cmc */
3919 /* complement carry flag from eflags reg */
3920 ctxt->eflags ^= EFLG_CF;
3922 case 0xf6 ... 0xf7: /* Grp3 */
3923 rc = emulate_grp3(ctxt, ops);
3925 case 0xf8: /* clc */
3926 ctxt->eflags &= ~EFLG_CF;
3928 case 0xf9: /* stc */
3929 ctxt->eflags |= EFLG_CF;
3931 case 0xfa: /* cli */
3932 if (emulator_bad_iopl(ctxt, ops)) {
3933 rc = emulate_gp(ctxt, 0);
3936 ctxt->eflags &= ~X86_EFLAGS_IF;
3938 case 0xfb: /* sti */
3939 if (emulator_bad_iopl(ctxt, ops)) {
3940 rc = emulate_gp(ctxt, 0);
3943 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3944 ctxt->eflags |= X86_EFLAGS_IF;
3947 case 0xfc: /* cld */
3948 ctxt->eflags &= ~EFLG_DF;
3950 case 0xfd: /* std */
3951 ctxt->eflags |= EFLG_DF;
3953 case 0xfe: /* Grp4 */
3955 rc = emulate_grp45(ctxt);
3957 case 0xff: /* Grp5 */
3958 if (c->modrm_reg == 5)
3962 goto cannot_emulate;
3965 if (rc != X86EMUL_CONTINUE)
3969 rc = writeback(ctxt, ops);
3970 if (rc != X86EMUL_CONTINUE)
3974 * restore dst type in case the decoding will be reused
3975 * (happens for string instruction )
3977 c->dst.type = saved_dst_type;
3979 if ((c->d & SrcMask) == SrcSI)
3980 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3981 VCPU_REGS_RSI, &c->src);
3983 if ((c->d & DstMask) == DstDI)
3984 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3987 if (c->rep_prefix && (c->d & String)) {
3988 struct read_cache *r = &ctxt->decode.io_read;
3989 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3991 if (!string_insn_completed(ctxt)) {
3993 * Re-enter guest when pio read ahead buffer is empty
3994 * or, if it is not used, after each 1024 iteration.
3996 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3997 (r->end == 0 || r->end != r->pos)) {
3999 * Reset read cache. Usually happens before
4000 * decode, but since instruction is restarted
4001 * we have to do it here.
4003 ctxt->decode.mem_read.end = 0;
4004 return EMULATION_RESTART;
4006 goto done; /* skip rip writeback */
4013 if (rc == X86EMUL_PROPAGATE_FAULT)
4014 ctxt->have_exception = true;
4015 if (rc == X86EMUL_INTERCEPTED)
4016 return EMULATION_INTERCEPTED;
4018 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4022 case 0x01: /* lgdt, lidt, lmsw */
4023 switch (c->modrm_reg) {
4024 case 0: /* vmcall */
4025 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4026 goto cannot_emulate;
4028 rc = kvm_fix_hypercall(ctxt->vcpu);
4029 if (rc != X86EMUL_CONTINUE)
4032 /* Let the processor re-execute the fixed hypercall */
4034 /* Disable writeback. */
4035 c->dst.type = OP_NONE;
4038 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
4039 &desc_ptr.size, &desc_ptr.address,
4041 if (rc != X86EMUL_CONTINUE)
4043 ctxt->ops->set_gdt(ctxt, &desc_ptr);
4044 /* Disable writeback. */
4045 c->dst.type = OP_NONE;
4047 case 3: /* lidt/vmmcall */
4048 if (c->modrm_mod == 3) {
4049 switch (c->modrm_rm) {
4051 rc = kvm_fix_hypercall(ctxt->vcpu);
4054 goto cannot_emulate;
4057 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
4061 if (rc != X86EMUL_CONTINUE)
4063 ctxt->ops->set_idt(ctxt, &desc_ptr);
4065 /* Disable writeback. */
4066 c->dst.type = OP_NONE;
4070 c->dst.val = ops->get_cr(ctxt, 0);
4073 ops->set_cr(ctxt, 0, (ops->get_cr(ctxt, 0) & ~0x0eul) |
4074 (c->src.val & 0x0f));
4075 c->dst.type = OP_NONE;
4077 case 5: /* not defined */
4079 rc = X86EMUL_PROPAGATE_FAULT;
4082 rc = em_invlpg(ctxt);
4085 goto cannot_emulate;
4088 case 0x05: /* syscall */
4089 rc = emulate_syscall(ctxt, ops);
4094 case 0x09: /* wbinvd */
4095 kvm_emulate_wbinvd(ctxt->vcpu);
4097 case 0x08: /* invd */
4098 case 0x0d: /* GrpP (prefetch) */
4099 case 0x18: /* Grp16 (prefetch/nop) */
4101 case 0x20: /* mov cr, reg */
4102 c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
4104 case 0x21: /* mov from dr to reg */
4105 ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
4107 case 0x22: /* mov reg, cr */
4108 if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
4109 emulate_gp(ctxt, 0);
4110 rc = X86EMUL_PROPAGATE_FAULT;
4113 c->dst.type = OP_NONE;
4115 case 0x23: /* mov from reg to dr */
4116 if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
4117 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4118 ~0ULL : ~0U)) < 0) {
4119 /* #UD condition is already handled by the code above */
4120 emulate_gp(ctxt, 0);
4121 rc = X86EMUL_PROPAGATE_FAULT;
4125 c->dst.type = OP_NONE; /* no writeback */
4129 msr_data = (u32)c->regs[VCPU_REGS_RAX]
4130 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
4131 if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
4132 emulate_gp(ctxt, 0);
4133 rc = X86EMUL_PROPAGATE_FAULT;
4136 rc = X86EMUL_CONTINUE;
4140 if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
4141 emulate_gp(ctxt, 0);
4142 rc = X86EMUL_PROPAGATE_FAULT;
4145 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
4146 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
4148 rc = X86EMUL_CONTINUE;
4150 case 0x34: /* sysenter */
4151 rc = emulate_sysenter(ctxt, ops);
4153 case 0x35: /* sysexit */
4154 rc = emulate_sysexit(ctxt, ops);
4156 case 0x40 ... 0x4f: /* cmov */
4157 c->dst.val = c->dst.orig_val = c->src.val;
4158 if (!test_cc(c->b, ctxt->eflags))
4159 c->dst.type = OP_NONE; /* no writeback */
4161 case 0x80 ... 0x8f: /* jnz rel, etc*/
4162 if (test_cc(c->b, ctxt->eflags))
4163 jmp_rel(c, c->src.val);
4165 case 0x90 ... 0x9f: /* setcc r/m8 */
4166 c->dst.val = test_cc(c->b, ctxt->eflags);
4168 case 0xa0: /* push fs */
4169 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
4171 case 0xa1: /* pop fs */
4172 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
4176 c->dst.type = OP_NONE;
4177 /* only subword offset */
4178 c->src.val &= (c->dst.bytes << 3) - 1;
4179 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
4181 case 0xa4: /* shld imm8, r, r/m */
4182 case 0xa5: /* shld cl, r, r/m */
4183 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
4185 case 0xa8: /* push gs */
4186 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
4188 case 0xa9: /* pop gs */
4189 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
4193 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
4195 case 0xac: /* shrd imm8, r, r/m */
4196 case 0xad: /* shrd cl, r, r/m */
4197 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
4199 case 0xae: /* clflush */
4201 case 0xb0 ... 0xb1: /* cmpxchg */
4203 * Save real source value, then compare EAX against
4206 c->src.orig_val = c->src.val;
4207 c->src.val = c->regs[VCPU_REGS_RAX];
4208 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
4209 if (ctxt->eflags & EFLG_ZF) {
4210 /* Success: write back to memory. */
4211 c->dst.val = c->src.orig_val;
4213 /* Failure: write the value we saw to EAX. */
4214 c->dst.type = OP_REG;
4215 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
4218 case 0xb2: /* lss */
4219 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
4223 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4225 case 0xb4: /* lfs */
4226 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
4228 case 0xb5: /* lgs */
4229 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
4231 case 0xb6 ... 0xb7: /* movzx */
4232 c->dst.bytes = c->op_bytes;
4233 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
4236 case 0xba: /* Grp8 */
4237 switch (c->modrm_reg & 3) {
4250 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
4252 case 0xbc: { /* bsf */
4254 __asm__ ("bsf %2, %0; setz %1"
4255 : "=r"(c->dst.val), "=q"(zf)
4257 ctxt->eflags &= ~X86_EFLAGS_ZF;
4259 ctxt->eflags |= X86_EFLAGS_ZF;
4260 c->dst.type = OP_NONE; /* Disable writeback. */
4264 case 0xbd: { /* bsr */
4266 __asm__ ("bsr %2, %0; setz %1"
4267 : "=r"(c->dst.val), "=q"(zf)
4269 ctxt->eflags &= ~X86_EFLAGS_ZF;
4271 ctxt->eflags |= X86_EFLAGS_ZF;
4272 c->dst.type = OP_NONE; /* Disable writeback. */
4276 case 0xbe ... 0xbf: /* movsx */
4277 c->dst.bytes = c->op_bytes;
4278 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
4281 case 0xc0 ... 0xc1: /* xadd */
4282 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
4283 /* Write back the register source. */
4284 c->src.val = c->dst.orig_val;
4285 write_register_operand(&c->src);
4287 case 0xc3: /* movnti */
4288 c->dst.bytes = c->op_bytes;
4289 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
4292 case 0xc7: /* Grp9 (cmpxchg8b) */
4293 rc = emulate_grp9(ctxt, ops);
4296 goto cannot_emulate;
4299 if (rc != X86EMUL_CONTINUE)
4305 return EMULATION_FAILED;