1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
80 #define RMExt (1<<18) /* Opcode extension in ModRM r/m if mod == 3 */
82 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
83 #define VendorSpecific (1<<22) /* Vendor specific instruction */
84 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
85 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
86 #define Undefined (1<<25) /* No Such Instruction */
87 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
88 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90 /* Source 2 operand type */
91 #define Src2None (0<<29)
92 #define Src2CL (1<<29)
93 #define Src2ImmByte (2<<29)
94 #define Src2One (3<<29)
95 #define Src2Imm (4<<29)
96 #define Src2Mask (7<<29)
99 #define X3(x...) X2(x), x
100 #define X4(x...) X2(x), X2(x)
101 #define X5(x...) X4(x), x
102 #define X6(x...) X4(x), X2(x)
103 #define X7(x...) X4(x), X3(x)
104 #define X8(x...) X4(x), X4(x)
105 #define X16(x...) X8(x), X8(x)
111 int (*execute)(struct x86_emulate_ctxt *ctxt);
112 struct opcode *group;
113 struct group_dual *gdual;
114 struct gprefix *gprefix;
116 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120 struct opcode mod012[8];
121 struct opcode mod3[8];
125 struct opcode pfx_no;
126 struct opcode pfx_66;
127 struct opcode pfx_f2;
128 struct opcode pfx_f3;
131 /* EFLAGS bit definitions. */
132 #define EFLG_ID (1<<21)
133 #define EFLG_VIP (1<<20)
134 #define EFLG_VIF (1<<19)
135 #define EFLG_AC (1<<18)
136 #define EFLG_VM (1<<17)
137 #define EFLG_RF (1<<16)
138 #define EFLG_IOPL (3<<12)
139 #define EFLG_NT (1<<14)
140 #define EFLG_OF (1<<11)
141 #define EFLG_DF (1<<10)
142 #define EFLG_IF (1<<9)
143 #define EFLG_TF (1<<8)
144 #define EFLG_SF (1<<7)
145 #define EFLG_ZF (1<<6)
146 #define EFLG_AF (1<<4)
147 #define EFLG_PF (1<<2)
148 #define EFLG_CF (1<<0)
150 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
151 #define EFLG_RESERVED_ONE_MASK 2
154 * Instruction emulation:
155 * Most instructions are emulated directly via a fragment of inline assembly
156 * code. This allows us to save/restore EFLAGS and thus very easily pick up
157 * any modified flags.
160 #if defined(CONFIG_X86_64)
161 #define _LO32 "k" /* force 32-bit operand */
162 #define _STK "%%rsp" /* stack pointer */
163 #elif defined(__i386__)
164 #define _LO32 "" /* force 32-bit operand */
165 #define _STK "%%esp" /* stack pointer */
169 * These EFLAGS bits are restored from saved value during emulation, and
170 * any changes are written back to the saved value after emulation.
172 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
174 /* Before executing instruction: restore necessary bits in EFLAGS. */
175 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
176 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
177 "movl %"_sav",%"_LO32 _tmp"; " \
180 "movl %"_msk",%"_LO32 _tmp"; " \
181 "andl %"_LO32 _tmp",("_STK"); " \
183 "notl %"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
187 "orl %"_LO32 _tmp",("_STK"); " \
191 /* After executing instruction: write-back necessary bits in EFLAGS. */
192 #define _POST_EFLAGS(_sav, _msk, _tmp) \
193 /* _sav |= EFLAGS & _msk; */ \
196 "andl %"_msk",%"_LO32 _tmp"; " \
197 "orl %"_LO32 _tmp",%"_sav"; "
205 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
207 __asm__ __volatile__ ( \
208 _PRE_EFLAGS("0", "4", "2") \
209 _op _suffix " %"_x"3,%1; " \
210 _POST_EFLAGS("0", "4", "2") \
211 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
213 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 /* Raw emulation: instruction has two explicit operands. */
218 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
220 unsigned long _tmp; \
222 switch ((_dst).bytes) { \
224 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
227 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
230 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
235 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
237 unsigned long _tmp; \
238 switch ((_dst).bytes) { \
240 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
243 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
244 _wx, _wy, _lx, _ly, _qx, _qy); \
249 /* Source operand is byte-sized and may be restricted to just %cl. */
250 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
251 __emulate_2op(_op, _src, _dst, _eflags, \
252 "b", "c", "b", "c", "b", "c", "b", "c")
254 /* Source operand is byte, word, long or quad sized. */
255 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
256 __emulate_2op(_op, _src, _dst, _eflags, \
257 "b", "q", "w", "r", _LO32, "r", "", "r")
259 /* Source operand is word, long or quad sized. */
260 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
261 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
262 "w", "r", _LO32, "r", "", "r")
264 /* Instruction has three operands and one operand is stored in ECX register */
265 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
267 unsigned long _tmp; \
268 _type _clv = (_cl).val; \
269 _type _srcv = (_src).val; \
270 _type _dstv = (_dst).val; \
272 __asm__ __volatile__ ( \
273 _PRE_EFLAGS("0", "5", "2") \
274 _op _suffix " %4,%1 \n" \
275 _POST_EFLAGS("0", "5", "2") \
276 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
277 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
280 (_cl).val = (unsigned long) _clv; \
281 (_src).val = (unsigned long) _srcv; \
282 (_dst).val = (unsigned long) _dstv; \
285 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
287 switch ((_dst).bytes) { \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "w", unsigned short); \
293 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "l", unsigned int); \
297 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
298 "q", unsigned long)); \
303 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
305 unsigned long _tmp; \
307 __asm__ __volatile__ ( \
308 _PRE_EFLAGS("0", "3", "2") \
309 _op _suffix " %1; " \
310 _POST_EFLAGS("0", "3", "2") \
311 : "=m" (_eflags), "+m" ((_dst).val), \
313 : "i" (EFLAGS_MASK)); \
316 /* Instruction has only one explicit operand (no source operand). */
317 #define emulate_1op(_op, _dst, _eflags) \
319 switch ((_dst).bytes) { \
320 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
321 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
322 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
323 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
329 unsigned long _tmp; \
331 __asm__ __volatile__ ( \
332 _PRE_EFLAGS("0", "4", "1") \
333 _op _suffix " %5; " \
334 _POST_EFLAGS("0", "4", "1") \
335 : "=m" (_eflags), "=&r" (_tmp), \
336 "+a" (_rax), "+d" (_rdx) \
337 : "i" (EFLAGS_MASK), "m" ((_src).val), \
338 "a" (_rax), "d" (_rdx)); \
341 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
343 unsigned long _tmp; \
345 __asm__ __volatile__ ( \
346 _PRE_EFLAGS("0", "5", "1") \
348 _op _suffix " %6; " \
350 _POST_EFLAGS("0", "5", "1") \
351 ".pushsection .fixup,\"ax\" \n\t" \
352 "3: movb $1, %4 \n\t" \
355 _ASM_EXTABLE(1b, 3b) \
356 : "=m" (_eflags), "=&r" (_tmp), \
357 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
358 : "i" (EFLAGS_MASK), "m" ((_src).val), \
359 "a" (_rax), "d" (_rdx)); \
362 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
363 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
365 switch((_src).bytes) { \
367 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
371 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
375 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
379 ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
385 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
387 switch((_src).bytes) { \
389 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
390 _eflags, "b", _ex); \
393 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
394 _eflags, "w", _ex); \
397 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
398 _eflags, "l", _ex); \
401 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
402 _eflags, "q", _ex)); \
407 /* Fetch next part of the instruction being emulated. */
408 #define insn_fetch(_type, _size, _eip) \
409 ({ unsigned long _x; \
410 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
411 if (rc != X86EMUL_CONTINUE) \
417 #define insn_fetch_arr(_arr, _size, _eip) \
418 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
419 if (rc != X86EMUL_CONTINUE) \
424 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
425 enum x86_intercept intercept,
426 enum x86_intercept_stage stage)
428 struct x86_instruction_info info = {
429 .intercept = intercept,
430 .rep_prefix = ctxt->decode.rep_prefix,
431 .modrm_mod = ctxt->decode.modrm_mod,
432 .modrm_reg = ctxt->decode.modrm_reg,
433 .modrm_rm = ctxt->decode.modrm_rm,
434 .src_val = ctxt->decode.src.val64,
435 .src_bytes = ctxt->decode.src.bytes,
436 .dst_bytes = ctxt->decode.dst.bytes,
437 .ad_bytes = ctxt->decode.ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static inline unsigned long ad_mask(struct decode_cache *c)
446 return (1UL << (c->ad_bytes << 3)) - 1;
449 /* Access/update address held in a register, based on addressing mode. */
450 static inline unsigned long
451 address_mask(struct decode_cache *c, unsigned long reg)
453 if (c->ad_bytes == sizeof(unsigned long))
456 return reg & ad_mask(c);
459 static inline unsigned long
460 register_address(struct decode_cache *c, unsigned long reg)
462 return address_mask(c, reg);
466 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
468 if (c->ad_bytes == sizeof(unsigned long))
471 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
474 static inline void jmp_rel(struct decode_cache *c, int rel)
476 register_address_increment(c, &c->eip, rel);
479 static u32 desc_limit_scaled(struct desc_struct *desc)
481 u32 limit = get_desc_limit(desc);
483 return desc->g ? (limit << 12) | 0xfff : limit;
486 static void set_seg_override(struct decode_cache *c, int seg)
488 c->has_seg_override = true;
489 c->seg_override = seg;
492 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
493 struct x86_emulate_ops *ops, int seg)
495 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
498 return ops->get_cached_segment_base(ctxt, seg);
501 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
502 struct x86_emulate_ops *ops,
503 struct decode_cache *c)
505 if (!c->has_seg_override)
508 return c->seg_override;
511 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
512 u32 error, bool valid)
514 ctxt->exception.vector = vec;
515 ctxt->exception.error_code = error;
516 ctxt->exception.error_code_valid = valid;
517 return X86EMUL_PROPAGATE_FAULT;
520 static int emulate_db(struct x86_emulate_ctxt *ctxt)
522 return emulate_exception(ctxt, DB_VECTOR, 0, false);
525 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
527 return emulate_exception(ctxt, GP_VECTOR, err, true);
530 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
532 return emulate_exception(ctxt, SS_VECTOR, err, true);
535 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
537 return emulate_exception(ctxt, UD_VECTOR, 0, false);
540 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
542 return emulate_exception(ctxt, TS_VECTOR, err, true);
545 static int emulate_de(struct x86_emulate_ctxt *ctxt)
547 return emulate_exception(ctxt, DE_VECTOR, 0, false);
550 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
552 return emulate_exception(ctxt, NM_VECTOR, 0, false);
555 static int __linearize(struct x86_emulate_ctxt *ctxt,
556 struct segmented_address addr,
557 unsigned size, bool write, bool fetch,
560 struct decode_cache *c = &ctxt->decode;
561 struct desc_struct desc;
567 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
568 switch (ctxt->mode) {
569 case X86EMUL_MODE_REAL:
571 case X86EMUL_MODE_PROT64:
572 if (((signed long)la << 16) >> 16 != la)
573 return emulate_gp(ctxt, 0);
576 usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL,
580 /* code segment or read-only data segment */
581 if (((desc.type & 8) || !(desc.type & 2)) && write)
583 /* unreadable code segment */
584 if (!fetch && (desc.type & 8) && !(desc.type & 2))
586 lim = desc_limit_scaled(&desc);
587 if ((desc.type & 8) || !(desc.type & 4)) {
588 /* expand-up segment */
589 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
592 /* exapand-down segment */
593 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
595 lim = desc.d ? 0xffffffff : 0xffff;
596 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
599 cpl = ctxt->ops->cpl(ctxt);
600 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3;
602 if (!(desc.type & 8)) {
606 } else if ((desc.type & 8) && !(desc.type & 4)) {
607 /* nonconforming code segment */
610 } else if ((desc.type & 8) && (desc.type & 4)) {
611 /* conforming code segment */
617 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
620 return X86EMUL_CONTINUE;
622 if (addr.seg == VCPU_SREG_SS)
623 return emulate_ss(ctxt, addr.seg);
625 return emulate_gp(ctxt, addr.seg);
628 static int linearize(struct x86_emulate_ctxt *ctxt,
629 struct segmented_address addr,
630 unsigned size, bool write,
633 return __linearize(ctxt, addr, size, write, false, linear);
637 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
638 struct segmented_address addr,
645 rc = linearize(ctxt, addr, size, false, &linear);
646 if (rc != X86EMUL_CONTINUE)
648 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
651 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
652 struct x86_emulate_ops *ops,
653 unsigned long eip, u8 *dest)
655 struct fetch_cache *fc = &ctxt->decode.fetch;
659 if (eip == fc->end) {
660 unsigned long linear;
661 struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
662 cur_size = fc->end - fc->start;
663 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
664 rc = __linearize(ctxt, addr, size, false, true, &linear);
665 if (rc != X86EMUL_CONTINUE)
667 rc = ops->fetch(ctxt, linear, fc->data + cur_size,
668 size, &ctxt->exception);
669 if (rc != X86EMUL_CONTINUE)
673 *dest = fc->data[eip - fc->start];
674 return X86EMUL_CONTINUE;
677 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
678 struct x86_emulate_ops *ops,
679 unsigned long eip, void *dest, unsigned size)
683 /* x86 instructions are limited to 15 bytes. */
684 if (eip + size - ctxt->eip > 15)
685 return X86EMUL_UNHANDLEABLE;
687 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
688 if (rc != X86EMUL_CONTINUE)
691 return X86EMUL_CONTINUE;
695 * Given the 'reg' portion of a ModRM byte, and a register block, return a
696 * pointer into the block that addresses the relevant register.
697 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
699 static void *decode_register(u8 modrm_reg, unsigned long *regs,
704 p = ®s[modrm_reg];
705 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
706 p = (unsigned char *)®s[modrm_reg & 3] + 1;
710 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
711 struct x86_emulate_ops *ops,
712 struct segmented_address addr,
713 u16 *size, unsigned long *address, int op_bytes)
720 rc = segmented_read_std(ctxt, addr, size, 2);
721 if (rc != X86EMUL_CONTINUE)
724 rc = segmented_read_std(ctxt, addr, address, op_bytes);
728 static int test_cc(unsigned int condition, unsigned int flags)
732 switch ((condition & 15) >> 1) {
734 rc |= (flags & EFLG_OF);
736 case 1: /* b/c/nae */
737 rc |= (flags & EFLG_CF);
740 rc |= (flags & EFLG_ZF);
743 rc |= (flags & (EFLG_CF|EFLG_ZF));
746 rc |= (flags & EFLG_SF);
749 rc |= (flags & EFLG_PF);
752 rc |= (flags & EFLG_ZF);
755 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
759 /* Odd condition identifiers (lsb == 1) have inverted sense. */
760 return (!!rc ^ (condition & 1));
763 static void fetch_register_operand(struct operand *op)
767 op->val = *(u8 *)op->addr.reg;
770 op->val = *(u16 *)op->addr.reg;
773 op->val = *(u32 *)op->addr.reg;
776 op->val = *(u64 *)op->addr.reg;
781 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
783 ctxt->ops->get_fpu(ctxt);
785 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
786 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
787 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
788 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
789 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
790 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
791 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
792 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
794 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
795 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
796 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
797 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
798 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
799 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
800 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
801 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
805 ctxt->ops->put_fpu(ctxt);
808 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
811 ctxt->ops->get_fpu(ctxt);
813 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
814 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
815 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
816 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
817 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
818 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
819 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
820 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
822 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
823 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
824 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
825 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
826 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
827 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
828 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
829 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
833 ctxt->ops->put_fpu(ctxt);
836 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
838 struct decode_cache *c,
841 unsigned reg = c->modrm_reg;
842 int highbyte_regs = c->rex_prefix == 0;
845 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
851 read_sse_reg(ctxt, &op->vec_val, reg);
856 if ((c->d & ByteOp) && !inhibit_bytereg) {
857 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
860 op->addr.reg = decode_register(reg, c->regs, 0);
861 op->bytes = c->op_bytes;
863 fetch_register_operand(op);
864 op->orig_val = op->val;
867 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
868 struct x86_emulate_ops *ops,
871 struct decode_cache *c = &ctxt->decode;
873 int index_reg = 0, base_reg = 0, scale;
874 int rc = X86EMUL_CONTINUE;
878 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
879 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
880 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
883 c->modrm = insn_fetch(u8, 1, c->eip);
884 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
885 c->modrm_reg |= (c->modrm & 0x38) >> 3;
886 c->modrm_rm |= (c->modrm & 0x07);
887 c->modrm_seg = VCPU_SREG_DS;
889 if (c->modrm_mod == 3) {
891 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
892 op->addr.reg = decode_register(c->modrm_rm,
893 c->regs, c->d & ByteOp);
897 op->addr.xmm = c->modrm_rm;
898 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
901 fetch_register_operand(op);
907 if (c->ad_bytes == 2) {
908 unsigned bx = c->regs[VCPU_REGS_RBX];
909 unsigned bp = c->regs[VCPU_REGS_RBP];
910 unsigned si = c->regs[VCPU_REGS_RSI];
911 unsigned di = c->regs[VCPU_REGS_RDI];
913 /* 16-bit ModR/M decode. */
914 switch (c->modrm_mod) {
916 if (c->modrm_rm == 6)
917 modrm_ea += insn_fetch(u16, 2, c->eip);
920 modrm_ea += insn_fetch(s8, 1, c->eip);
923 modrm_ea += insn_fetch(u16, 2, c->eip);
926 switch (c->modrm_rm) {
946 if (c->modrm_mod != 0)
953 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
954 (c->modrm_rm == 6 && c->modrm_mod != 0))
955 c->modrm_seg = VCPU_SREG_SS;
956 modrm_ea = (u16)modrm_ea;
958 /* 32/64-bit ModR/M decode. */
959 if ((c->modrm_rm & 7) == 4) {
960 sib = insn_fetch(u8, 1, c->eip);
961 index_reg |= (sib >> 3) & 7;
965 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
966 modrm_ea += insn_fetch(s32, 4, c->eip);
968 modrm_ea += c->regs[base_reg];
970 modrm_ea += c->regs[index_reg] << scale;
971 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
972 if (ctxt->mode == X86EMUL_MODE_PROT64)
975 modrm_ea += c->regs[c->modrm_rm];
976 switch (c->modrm_mod) {
978 if (c->modrm_rm == 5)
979 modrm_ea += insn_fetch(s32, 4, c->eip);
982 modrm_ea += insn_fetch(s8, 1, c->eip);
985 modrm_ea += insn_fetch(s32, 4, c->eip);
989 op->addr.mem.ea = modrm_ea;
994 static int decode_abs(struct x86_emulate_ctxt *ctxt,
995 struct x86_emulate_ops *ops,
998 struct decode_cache *c = &ctxt->decode;
999 int rc = X86EMUL_CONTINUE;
1002 switch (c->ad_bytes) {
1004 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
1007 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
1010 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
1017 static void fetch_bit_operand(struct decode_cache *c)
1021 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
1022 mask = ~(c->dst.bytes * 8 - 1);
1024 if (c->src.bytes == 2)
1025 sv = (s16)c->src.val & (s16)mask;
1026 else if (c->src.bytes == 4)
1027 sv = (s32)c->src.val & (s32)mask;
1029 c->dst.addr.mem.ea += (sv >> 3);
1032 /* only subword offset */
1033 c->src.val &= (c->dst.bytes << 3) - 1;
1036 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1037 struct x86_emulate_ops *ops,
1038 unsigned long addr, void *dest, unsigned size)
1041 struct read_cache *mc = &ctxt->decode.mem_read;
1044 int n = min(size, 8u);
1046 if (mc->pos < mc->end)
1049 rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1051 if (rc != X86EMUL_CONTINUE)
1056 memcpy(dest, mc->data + mc->pos, n);
1061 return X86EMUL_CONTINUE;
1064 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1065 struct segmented_address addr,
1072 rc = linearize(ctxt, addr, size, false, &linear);
1073 if (rc != X86EMUL_CONTINUE)
1075 return read_emulated(ctxt, ctxt->ops, linear, data, size);
1078 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1079 struct segmented_address addr,
1086 rc = linearize(ctxt, addr, size, true, &linear);
1087 if (rc != X86EMUL_CONTINUE)
1089 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1093 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1094 struct segmented_address addr,
1095 const void *orig_data, const void *data,
1101 rc = linearize(ctxt, addr, size, true, &linear);
1102 if (rc != X86EMUL_CONTINUE)
1104 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1105 size, &ctxt->exception);
1108 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1109 struct x86_emulate_ops *ops,
1110 unsigned int size, unsigned short port,
1113 struct read_cache *rc = &ctxt->decode.io_read;
1115 if (rc->pos == rc->end) { /* refill pio read ahead */
1116 struct decode_cache *c = &ctxt->decode;
1117 unsigned int in_page, n;
1118 unsigned int count = c->rep_prefix ?
1119 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1120 in_page = (ctxt->eflags & EFLG_DF) ?
1121 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1122 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1123 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1127 rc->pos = rc->end = 0;
1128 if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1133 memcpy(dest, rc->data + rc->pos, size);
1138 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1139 struct x86_emulate_ops *ops,
1140 u16 selector, struct desc_ptr *dt)
1142 if (selector & 1 << 2) {
1143 struct desc_struct desc;
1144 memset (dt, 0, sizeof *dt);
1145 if (!ops->get_cached_descriptor(ctxt, &desc, NULL,
1149 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1150 dt->address = get_desc_base(&desc);
1152 ops->get_gdt(ctxt, dt);
1155 /* allowed just for 8 bytes segments */
1156 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1157 struct x86_emulate_ops *ops,
1158 u16 selector, struct desc_struct *desc)
1161 u16 index = selector >> 3;
1165 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1167 if (dt.size < index * 8 + 7)
1168 return emulate_gp(ctxt, selector & 0xfffc);
1169 addr = dt.address + index * 8;
1170 ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1175 /* allowed just for 8 bytes segments */
1176 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1177 struct x86_emulate_ops *ops,
1178 u16 selector, struct desc_struct *desc)
1181 u16 index = selector >> 3;
1185 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1187 if (dt.size < index * 8 + 7)
1188 return emulate_gp(ctxt, selector & 0xfffc);
1190 addr = dt.address + index * 8;
1191 ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1196 /* Does not support long mode */
1197 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1198 struct x86_emulate_ops *ops,
1199 u16 selector, int seg)
1201 struct desc_struct seg_desc;
1203 unsigned err_vec = GP_VECTOR;
1205 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1208 memset(&seg_desc, 0, sizeof seg_desc);
1210 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1211 || ctxt->mode == X86EMUL_MODE_REAL) {
1212 /* set real mode segment descriptor */
1213 set_desc_base(&seg_desc, selector << 4);
1214 set_desc_limit(&seg_desc, 0xffff);
1221 /* NULL selector is not valid for TR, CS and SS */
1222 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1226 /* TR should be in GDT only */
1227 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1230 if (null_selector) /* for NULL selector skip all following checks */
1233 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1234 if (ret != X86EMUL_CONTINUE)
1237 err_code = selector & 0xfffc;
1238 err_vec = GP_VECTOR;
1240 /* can't load system descriptor into segment selecor */
1241 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1245 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1251 cpl = ops->cpl(ctxt);
1256 * segment is not a writable data segment or segment
1257 * selector's RPL != CPL or segment selector's RPL != CPL
1259 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1263 if (!(seg_desc.type & 8))
1266 if (seg_desc.type & 4) {
1272 if (rpl > cpl || dpl != cpl)
1275 /* CS(RPL) <- CPL */
1276 selector = (selector & 0xfffc) | cpl;
1279 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1282 case VCPU_SREG_LDTR:
1283 if (seg_desc.s || seg_desc.type != 2)
1286 default: /* DS, ES, FS, or GS */
1288 * segment is not a data or readable code segment or
1289 * ((segment is a data or nonconforming code segment)
1290 * and (both RPL and CPL > DPL))
1292 if ((seg_desc.type & 0xa) == 0x8 ||
1293 (((seg_desc.type & 0xc) != 0xc) &&
1294 (rpl > dpl && cpl > dpl)))
1300 /* mark segment as accessed */
1302 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1303 if (ret != X86EMUL_CONTINUE)
1307 ops->set_segment_selector(ctxt, selector, seg);
1308 ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
1309 return X86EMUL_CONTINUE;
1311 emulate_exception(ctxt, err_vec, err_code, true);
1312 return X86EMUL_PROPAGATE_FAULT;
1315 static void write_register_operand(struct operand *op)
1317 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1318 switch (op->bytes) {
1320 *(u8 *)op->addr.reg = (u8)op->val;
1323 *(u16 *)op->addr.reg = (u16)op->val;
1326 *op->addr.reg = (u32)op->val;
1327 break; /* 64b: zero-extend */
1329 *op->addr.reg = op->val;
1334 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1335 struct x86_emulate_ops *ops)
1338 struct decode_cache *c = &ctxt->decode;
1340 switch (c->dst.type) {
1342 write_register_operand(&c->dst);
1346 rc = segmented_cmpxchg(ctxt,
1352 rc = segmented_write(ctxt,
1356 if (rc != X86EMUL_CONTINUE)
1360 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1368 return X86EMUL_CONTINUE;
1371 static int em_push(struct x86_emulate_ctxt *ctxt)
1373 struct decode_cache *c = &ctxt->decode;
1374 struct segmented_address addr;
1376 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1377 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1378 addr.seg = VCPU_SREG_SS;
1380 /* Disable writeback. */
1381 c->dst.type = OP_NONE;
1382 return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
1385 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1386 struct x86_emulate_ops *ops,
1387 void *dest, int len)
1389 struct decode_cache *c = &ctxt->decode;
1391 struct segmented_address addr;
1393 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1394 addr.seg = VCPU_SREG_SS;
1395 rc = segmented_read(ctxt, addr, dest, len);
1396 if (rc != X86EMUL_CONTINUE)
1399 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1403 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1404 struct x86_emulate_ops *ops,
1405 void *dest, int len)
1408 unsigned long val, change_mask;
1409 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1410 int cpl = ops->cpl(ctxt);
1412 rc = emulate_pop(ctxt, ops, &val, len);
1413 if (rc != X86EMUL_CONTINUE)
1416 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1417 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1419 switch(ctxt->mode) {
1420 case X86EMUL_MODE_PROT64:
1421 case X86EMUL_MODE_PROT32:
1422 case X86EMUL_MODE_PROT16:
1424 change_mask |= EFLG_IOPL;
1426 change_mask |= EFLG_IF;
1428 case X86EMUL_MODE_VM86:
1430 return emulate_gp(ctxt, 0);
1431 change_mask |= EFLG_IF;
1433 default: /* real mode */
1434 change_mask |= (EFLG_IOPL | EFLG_IF);
1438 *(unsigned long *)dest =
1439 (ctxt->eflags & ~change_mask) | (val & change_mask);
1444 static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1445 struct x86_emulate_ops *ops, int seg)
1447 struct decode_cache *c = &ctxt->decode;
1449 c->src.val = ops->get_segment_selector(ctxt, seg);
1451 return em_push(ctxt);
1454 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1455 struct x86_emulate_ops *ops, int seg)
1457 struct decode_cache *c = &ctxt->decode;
1458 unsigned long selector;
1461 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1462 if (rc != X86EMUL_CONTINUE)
1465 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1469 static int emulate_pusha(struct x86_emulate_ctxt *ctxt)
1471 struct decode_cache *c = &ctxt->decode;
1472 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1473 int rc = X86EMUL_CONTINUE;
1474 int reg = VCPU_REGS_RAX;
1476 while (reg <= VCPU_REGS_RDI) {
1477 (reg == VCPU_REGS_RSP) ?
1478 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1481 if (rc != X86EMUL_CONTINUE)
1490 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1491 struct x86_emulate_ops *ops)
1493 struct decode_cache *c = &ctxt->decode;
1494 int rc = X86EMUL_CONTINUE;
1495 int reg = VCPU_REGS_RDI;
1497 while (reg >= VCPU_REGS_RAX) {
1498 if (reg == VCPU_REGS_RSP) {
1499 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1504 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1505 if (rc != X86EMUL_CONTINUE)
1512 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1513 struct x86_emulate_ops *ops, int irq)
1515 struct decode_cache *c = &ctxt->decode;
1522 /* TODO: Add limit checks */
1523 c->src.val = ctxt->eflags;
1525 if (rc != X86EMUL_CONTINUE)
1528 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1530 c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
1532 if (rc != X86EMUL_CONTINUE)
1535 c->src.val = c->eip;
1537 if (rc != X86EMUL_CONTINUE)
1540 ops->get_idt(ctxt, &dt);
1542 eip_addr = dt.address + (irq << 2);
1543 cs_addr = dt.address + (irq << 2) + 2;
1545 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1546 if (rc != X86EMUL_CONTINUE)
1549 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1550 if (rc != X86EMUL_CONTINUE)
1553 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1554 if (rc != X86EMUL_CONTINUE)
1562 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1563 struct x86_emulate_ops *ops, int irq)
1565 switch(ctxt->mode) {
1566 case X86EMUL_MODE_REAL:
1567 return emulate_int_real(ctxt, ops, irq);
1568 case X86EMUL_MODE_VM86:
1569 case X86EMUL_MODE_PROT16:
1570 case X86EMUL_MODE_PROT32:
1571 case X86EMUL_MODE_PROT64:
1573 /* Protected mode interrupts unimplemented yet */
1574 return X86EMUL_UNHANDLEABLE;
1578 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1579 struct x86_emulate_ops *ops)
1581 struct decode_cache *c = &ctxt->decode;
1582 int rc = X86EMUL_CONTINUE;
1583 unsigned long temp_eip = 0;
1584 unsigned long temp_eflags = 0;
1585 unsigned long cs = 0;
1586 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1587 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1588 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1589 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1591 /* TODO: Add stack limit check */
1593 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1595 if (rc != X86EMUL_CONTINUE)
1598 if (temp_eip & ~0xffff)
1599 return emulate_gp(ctxt, 0);
1601 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1603 if (rc != X86EMUL_CONTINUE)
1606 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1608 if (rc != X86EMUL_CONTINUE)
1611 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1613 if (rc != X86EMUL_CONTINUE)
1619 if (c->op_bytes == 4)
1620 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1621 else if (c->op_bytes == 2) {
1622 ctxt->eflags &= ~0xffff;
1623 ctxt->eflags |= temp_eflags;
1626 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1627 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1632 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1633 struct x86_emulate_ops* ops)
1635 switch(ctxt->mode) {
1636 case X86EMUL_MODE_REAL:
1637 return emulate_iret_real(ctxt, ops);
1638 case X86EMUL_MODE_VM86:
1639 case X86EMUL_MODE_PROT16:
1640 case X86EMUL_MODE_PROT32:
1641 case X86EMUL_MODE_PROT64:
1643 /* iret from protected mode unimplemented yet */
1644 return X86EMUL_UNHANDLEABLE;
1648 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1649 struct x86_emulate_ops *ops)
1651 struct decode_cache *c = &ctxt->decode;
1653 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1656 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1658 struct decode_cache *c = &ctxt->decode;
1659 switch (c->modrm_reg) {
1661 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1664 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1667 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1670 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1672 case 4: /* sal/shl */
1673 case 6: /* sal/shl */
1674 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1677 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1680 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1685 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1686 struct x86_emulate_ops *ops)
1688 struct decode_cache *c = &ctxt->decode;
1689 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1690 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1693 switch (c->modrm_reg) {
1694 case 0 ... 1: /* test */
1695 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1698 c->dst.val = ~c->dst.val;
1701 emulate_1op("neg", c->dst, ctxt->eflags);
1704 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1707 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1710 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1714 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1718 return X86EMUL_UNHANDLEABLE;
1721 return emulate_de(ctxt);
1722 return X86EMUL_CONTINUE;
1725 static int emulate_grp45(struct x86_emulate_ctxt *ctxt)
1727 struct decode_cache *c = &ctxt->decode;
1728 int rc = X86EMUL_CONTINUE;
1730 switch (c->modrm_reg) {
1732 emulate_1op("inc", c->dst, ctxt->eflags);
1735 emulate_1op("dec", c->dst, ctxt->eflags);
1737 case 2: /* call near abs */ {
1740 c->eip = c->src.val;
1741 c->src.val = old_eip;
1745 case 4: /* jmp abs */
1746 c->eip = c->src.val;
1755 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1756 struct x86_emulate_ops *ops)
1758 struct decode_cache *c = &ctxt->decode;
1759 u64 old = c->dst.orig_val64;
1761 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1762 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1763 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1764 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1765 ctxt->eflags &= ~EFLG_ZF;
1767 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1768 (u32) c->regs[VCPU_REGS_RBX];
1770 ctxt->eflags |= EFLG_ZF;
1772 return X86EMUL_CONTINUE;
1775 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1776 struct x86_emulate_ops *ops)
1778 struct decode_cache *c = &ctxt->decode;
1782 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1783 if (rc != X86EMUL_CONTINUE)
1785 if (c->op_bytes == 4)
1786 c->eip = (u32)c->eip;
1787 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1788 if (rc != X86EMUL_CONTINUE)
1790 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1794 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1795 struct x86_emulate_ops *ops, int seg)
1797 struct decode_cache *c = &ctxt->decode;
1801 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1803 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1804 if (rc != X86EMUL_CONTINUE)
1807 c->dst.val = c->src.val;
1812 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1813 struct x86_emulate_ops *ops, struct desc_struct *cs,
1814 struct desc_struct *ss)
1816 memset(cs, 0, sizeof(struct desc_struct));
1817 ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS);
1818 memset(ss, 0, sizeof(struct desc_struct));
1820 cs->l = 0; /* will be adjusted later */
1821 set_desc_base(cs, 0); /* flat segment */
1822 cs->g = 1; /* 4kb granularity */
1823 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1824 cs->type = 0x0b; /* Read, Execute, Accessed */
1826 cs->dpl = 0; /* will be adjusted later */
1830 set_desc_base(ss, 0); /* flat segment */
1831 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1832 ss->g = 1; /* 4kb granularity */
1834 ss->type = 0x03; /* Read/Write, Accessed */
1835 ss->d = 1; /* 32bit stack segment */
1841 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1843 struct decode_cache *c = &ctxt->decode;
1844 struct desc_struct cs, ss;
1849 /* syscall is not available in real mode */
1850 if (ctxt->mode == X86EMUL_MODE_REAL ||
1851 ctxt->mode == X86EMUL_MODE_VM86)
1852 return emulate_ud(ctxt);
1854 ops->get_msr(ctxt, MSR_EFER, &efer);
1855 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1857 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1859 cs_sel = (u16)(msr_data & 0xfffc);
1860 ss_sel = (u16)(msr_data + 8);
1862 if (efer & EFER_LMA) {
1866 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1867 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1868 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1869 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1871 c->regs[VCPU_REGS_RCX] = c->eip;
1872 if (efer & EFER_LMA) {
1873 #ifdef CONFIG_X86_64
1874 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1877 ctxt->mode == X86EMUL_MODE_PROT64 ?
1878 MSR_LSTAR : MSR_CSTAR, &msr_data);
1881 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1882 ctxt->eflags &= ~(msr_data | EFLG_RF);
1886 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1887 c->eip = (u32)msr_data;
1889 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1892 return X86EMUL_CONTINUE;
1896 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1898 struct decode_cache *c = &ctxt->decode;
1899 struct desc_struct cs, ss;
1904 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1905 /* inject #GP if in real mode */
1906 if (ctxt->mode == X86EMUL_MODE_REAL)
1907 return emulate_gp(ctxt, 0);
1909 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1910 * Therefore, we inject an #UD.
1912 if (ctxt->mode == X86EMUL_MODE_PROT64)
1913 return emulate_ud(ctxt);
1915 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1917 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1918 switch (ctxt->mode) {
1919 case X86EMUL_MODE_PROT32:
1920 if ((msr_data & 0xfffc) == 0x0)
1921 return emulate_gp(ctxt, 0);
1923 case X86EMUL_MODE_PROT64:
1924 if (msr_data == 0x0)
1925 return emulate_gp(ctxt, 0);
1929 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1930 cs_sel = (u16)msr_data;
1931 cs_sel &= ~SELECTOR_RPL_MASK;
1932 ss_sel = cs_sel + 8;
1933 ss_sel &= ~SELECTOR_RPL_MASK;
1934 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1939 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1940 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1941 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1942 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1944 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1947 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1948 c->regs[VCPU_REGS_RSP] = msr_data;
1950 return X86EMUL_CONTINUE;
1954 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1956 struct decode_cache *c = &ctxt->decode;
1957 struct desc_struct cs, ss;
1962 /* inject #GP if in real mode or Virtual 8086 mode */
1963 if (ctxt->mode == X86EMUL_MODE_REAL ||
1964 ctxt->mode == X86EMUL_MODE_VM86)
1965 return emulate_gp(ctxt, 0);
1967 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1969 if ((c->rex_prefix & 0x8) != 0x0)
1970 usermode = X86EMUL_MODE_PROT64;
1972 usermode = X86EMUL_MODE_PROT32;
1976 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1978 case X86EMUL_MODE_PROT32:
1979 cs_sel = (u16)(msr_data + 16);
1980 if ((msr_data & 0xfffc) == 0x0)
1981 return emulate_gp(ctxt, 0);
1982 ss_sel = (u16)(msr_data + 24);
1984 case X86EMUL_MODE_PROT64:
1985 cs_sel = (u16)(msr_data + 32);
1986 if (msr_data == 0x0)
1987 return emulate_gp(ctxt, 0);
1988 ss_sel = cs_sel + 8;
1993 cs_sel |= SELECTOR_RPL_MASK;
1994 ss_sel |= SELECTOR_RPL_MASK;
1996 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1997 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1998 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1999 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
2001 c->eip = c->regs[VCPU_REGS_RDX];
2002 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2004 return X86EMUL_CONTINUE;
2007 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2008 struct x86_emulate_ops *ops)
2011 if (ctxt->mode == X86EMUL_MODE_REAL)
2013 if (ctxt->mode == X86EMUL_MODE_VM86)
2015 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2016 return ops->cpl(ctxt) > iopl;
2019 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2020 struct x86_emulate_ops *ops,
2023 struct desc_struct tr_seg;
2026 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
2027 unsigned mask = (1 << len) - 1;
2030 ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR);
2033 if (desc_limit_scaled(&tr_seg) < 103)
2035 base = get_desc_base(&tr_seg);
2036 #ifdef CONFIG_X86_64
2037 base |= ((u64)base3) << 32;
2039 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2040 if (r != X86EMUL_CONTINUE)
2042 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2044 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2045 if (r != X86EMUL_CONTINUE)
2047 if ((perm >> bit_idx) & mask)
2052 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2053 struct x86_emulate_ops *ops,
2059 if (emulator_bad_iopl(ctxt, ops))
2060 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2063 ctxt->perm_ok = true;
2068 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2069 struct x86_emulate_ops *ops,
2070 struct tss_segment_16 *tss)
2072 struct decode_cache *c = &ctxt->decode;
2075 tss->flag = ctxt->eflags;
2076 tss->ax = c->regs[VCPU_REGS_RAX];
2077 tss->cx = c->regs[VCPU_REGS_RCX];
2078 tss->dx = c->regs[VCPU_REGS_RDX];
2079 tss->bx = c->regs[VCPU_REGS_RBX];
2080 tss->sp = c->regs[VCPU_REGS_RSP];
2081 tss->bp = c->regs[VCPU_REGS_RBP];
2082 tss->si = c->regs[VCPU_REGS_RSI];
2083 tss->di = c->regs[VCPU_REGS_RDI];
2085 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2086 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2087 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2088 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2089 tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2092 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2093 struct x86_emulate_ops *ops,
2094 struct tss_segment_16 *tss)
2096 struct decode_cache *c = &ctxt->decode;
2100 ctxt->eflags = tss->flag | 2;
2101 c->regs[VCPU_REGS_RAX] = tss->ax;
2102 c->regs[VCPU_REGS_RCX] = tss->cx;
2103 c->regs[VCPU_REGS_RDX] = tss->dx;
2104 c->regs[VCPU_REGS_RBX] = tss->bx;
2105 c->regs[VCPU_REGS_RSP] = tss->sp;
2106 c->regs[VCPU_REGS_RBP] = tss->bp;
2107 c->regs[VCPU_REGS_RSI] = tss->si;
2108 c->regs[VCPU_REGS_RDI] = tss->di;
2111 * SDM says that segment selectors are loaded before segment
2114 ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2115 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2116 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2117 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2118 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2121 * Now load segment descriptors. If fault happenes at this stage
2122 * it is handled in a context of new task
2124 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
2125 if (ret != X86EMUL_CONTINUE)
2127 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2128 if (ret != X86EMUL_CONTINUE)
2130 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2131 if (ret != X86EMUL_CONTINUE)
2133 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2134 if (ret != X86EMUL_CONTINUE)
2136 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2137 if (ret != X86EMUL_CONTINUE)
2140 return X86EMUL_CONTINUE;
2143 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2144 struct x86_emulate_ops *ops,
2145 u16 tss_selector, u16 old_tss_sel,
2146 ulong old_tss_base, struct desc_struct *new_desc)
2148 struct tss_segment_16 tss_seg;
2150 u32 new_tss_base = get_desc_base(new_desc);
2152 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2154 if (ret != X86EMUL_CONTINUE)
2155 /* FIXME: need to provide precise fault address */
2158 save_state_to_tss16(ctxt, ops, &tss_seg);
2160 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2162 if (ret != X86EMUL_CONTINUE)
2163 /* FIXME: need to provide precise fault address */
2166 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2168 if (ret != X86EMUL_CONTINUE)
2169 /* FIXME: need to provide precise fault address */
2172 if (old_tss_sel != 0xffff) {
2173 tss_seg.prev_task_link = old_tss_sel;
2175 ret = ops->write_std(ctxt, new_tss_base,
2176 &tss_seg.prev_task_link,
2177 sizeof tss_seg.prev_task_link,
2179 if (ret != X86EMUL_CONTINUE)
2180 /* FIXME: need to provide precise fault address */
2184 return load_state_from_tss16(ctxt, ops, &tss_seg);
2187 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2188 struct x86_emulate_ops *ops,
2189 struct tss_segment_32 *tss)
2191 struct decode_cache *c = &ctxt->decode;
2193 tss->cr3 = ops->get_cr(ctxt, 3);
2195 tss->eflags = ctxt->eflags;
2196 tss->eax = c->regs[VCPU_REGS_RAX];
2197 tss->ecx = c->regs[VCPU_REGS_RCX];
2198 tss->edx = c->regs[VCPU_REGS_RDX];
2199 tss->ebx = c->regs[VCPU_REGS_RBX];
2200 tss->esp = c->regs[VCPU_REGS_RSP];
2201 tss->ebp = c->regs[VCPU_REGS_RBP];
2202 tss->esi = c->regs[VCPU_REGS_RSI];
2203 tss->edi = c->regs[VCPU_REGS_RDI];
2205 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2206 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2207 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2208 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2209 tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS);
2210 tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS);
2211 tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2214 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2215 struct x86_emulate_ops *ops,
2216 struct tss_segment_32 *tss)
2218 struct decode_cache *c = &ctxt->decode;
2221 if (ops->set_cr(ctxt, 3, tss->cr3))
2222 return emulate_gp(ctxt, 0);
2224 ctxt->eflags = tss->eflags | 2;
2225 c->regs[VCPU_REGS_RAX] = tss->eax;
2226 c->regs[VCPU_REGS_RCX] = tss->ecx;
2227 c->regs[VCPU_REGS_RDX] = tss->edx;
2228 c->regs[VCPU_REGS_RBX] = tss->ebx;
2229 c->regs[VCPU_REGS_RSP] = tss->esp;
2230 c->regs[VCPU_REGS_RBP] = tss->ebp;
2231 c->regs[VCPU_REGS_RSI] = tss->esi;
2232 c->regs[VCPU_REGS_RDI] = tss->edi;
2235 * SDM says that segment selectors are loaded before segment
2238 ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2239 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2240 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2241 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2242 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2243 ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2244 ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2247 * Now load segment descriptors. If fault happenes at this stage
2248 * it is handled in a context of new task
2250 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2251 if (ret != X86EMUL_CONTINUE)
2253 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2254 if (ret != X86EMUL_CONTINUE)
2256 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2257 if (ret != X86EMUL_CONTINUE)
2259 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2260 if (ret != X86EMUL_CONTINUE)
2262 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2263 if (ret != X86EMUL_CONTINUE)
2265 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2266 if (ret != X86EMUL_CONTINUE)
2268 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2269 if (ret != X86EMUL_CONTINUE)
2272 return X86EMUL_CONTINUE;
2275 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2276 struct x86_emulate_ops *ops,
2277 u16 tss_selector, u16 old_tss_sel,
2278 ulong old_tss_base, struct desc_struct *new_desc)
2280 struct tss_segment_32 tss_seg;
2282 u32 new_tss_base = get_desc_base(new_desc);
2284 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2286 if (ret != X86EMUL_CONTINUE)
2287 /* FIXME: need to provide precise fault address */
2290 save_state_to_tss32(ctxt, ops, &tss_seg);
2292 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2294 if (ret != X86EMUL_CONTINUE)
2295 /* FIXME: need to provide precise fault address */
2298 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2300 if (ret != X86EMUL_CONTINUE)
2301 /* FIXME: need to provide precise fault address */
2304 if (old_tss_sel != 0xffff) {
2305 tss_seg.prev_task_link = old_tss_sel;
2307 ret = ops->write_std(ctxt, new_tss_base,
2308 &tss_seg.prev_task_link,
2309 sizeof tss_seg.prev_task_link,
2311 if (ret != X86EMUL_CONTINUE)
2312 /* FIXME: need to provide precise fault address */
2316 return load_state_from_tss32(ctxt, ops, &tss_seg);
2319 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2320 struct x86_emulate_ops *ops,
2321 u16 tss_selector, int reason,
2322 bool has_error_code, u32 error_code)
2324 struct desc_struct curr_tss_desc, next_tss_desc;
2326 u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR);
2327 ulong old_tss_base =
2328 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2331 /* FIXME: old_tss_base == ~0 ? */
2333 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2334 if (ret != X86EMUL_CONTINUE)
2336 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2337 if (ret != X86EMUL_CONTINUE)
2340 /* FIXME: check that next_tss_desc is tss */
2342 if (reason != TASK_SWITCH_IRET) {
2343 if ((tss_selector & 3) > next_tss_desc.dpl ||
2344 ops->cpl(ctxt) > next_tss_desc.dpl)
2345 return emulate_gp(ctxt, 0);
2348 desc_limit = desc_limit_scaled(&next_tss_desc);
2349 if (!next_tss_desc.p ||
2350 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2351 desc_limit < 0x2b)) {
2352 emulate_ts(ctxt, tss_selector & 0xfffc);
2353 return X86EMUL_PROPAGATE_FAULT;
2356 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2357 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2358 write_segment_descriptor(ctxt, ops, old_tss_sel,
2362 if (reason == TASK_SWITCH_IRET)
2363 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2365 /* set back link to prev task only if NT bit is set in eflags
2366 note that old_tss_sel is not used afetr this point */
2367 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2368 old_tss_sel = 0xffff;
2370 if (next_tss_desc.type & 8)
2371 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2372 old_tss_base, &next_tss_desc);
2374 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2375 old_tss_base, &next_tss_desc);
2376 if (ret != X86EMUL_CONTINUE)
2379 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2380 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2382 if (reason != TASK_SWITCH_IRET) {
2383 next_tss_desc.type |= (1 << 1); /* set busy flag */
2384 write_segment_descriptor(ctxt, ops, tss_selector,
2388 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2389 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR);
2390 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
2392 if (has_error_code) {
2393 struct decode_cache *c = &ctxt->decode;
2395 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2397 c->src.val = (unsigned long) error_code;
2398 ret = em_push(ctxt);
2404 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2405 u16 tss_selector, int reason,
2406 bool has_error_code, u32 error_code)
2408 struct x86_emulate_ops *ops = ctxt->ops;
2409 struct decode_cache *c = &ctxt->decode;
2413 c->dst.type = OP_NONE;
2415 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2416 has_error_code, error_code);
2418 if (rc == X86EMUL_CONTINUE)
2421 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2424 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2425 int reg, struct operand *op)
2427 struct decode_cache *c = &ctxt->decode;
2428 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2430 register_address_increment(c, &c->regs[reg], df * op->bytes);
2431 op->addr.mem.ea = register_address(c, c->regs[reg]);
2432 op->addr.mem.seg = seg;
2435 static int em_das(struct x86_emulate_ctxt *ctxt)
2437 struct decode_cache *c = &ctxt->decode;
2439 bool af, cf, old_cf;
2441 cf = ctxt->eflags & X86_EFLAGS_CF;
2447 af = ctxt->eflags & X86_EFLAGS_AF;
2448 if ((al & 0x0f) > 9 || af) {
2450 cf = old_cf | (al >= 250);
2455 if (old_al > 0x99 || old_cf) {
2461 /* Set PF, ZF, SF */
2462 c->src.type = OP_IMM;
2465 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2466 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2468 ctxt->eflags |= X86_EFLAGS_CF;
2470 ctxt->eflags |= X86_EFLAGS_AF;
2471 return X86EMUL_CONTINUE;
2474 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2476 struct decode_cache *c = &ctxt->decode;
2481 old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2484 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2485 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2486 return X86EMUL_CONTINUE;
2489 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2491 c->src.val = old_cs;
2493 if (rc != X86EMUL_CONTINUE)
2496 c->src.val = old_eip;
2497 return em_push(ctxt);
2500 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2502 struct decode_cache *c = &ctxt->decode;
2505 c->dst.type = OP_REG;
2506 c->dst.addr.reg = &c->eip;
2507 c->dst.bytes = c->op_bytes;
2508 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2509 if (rc != X86EMUL_CONTINUE)
2511 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2512 return X86EMUL_CONTINUE;
2515 static int em_imul(struct x86_emulate_ctxt *ctxt)
2517 struct decode_cache *c = &ctxt->decode;
2519 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2520 return X86EMUL_CONTINUE;
2523 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2525 struct decode_cache *c = &ctxt->decode;
2527 c->dst.val = c->src2.val;
2528 return em_imul(ctxt);
2531 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2533 struct decode_cache *c = &ctxt->decode;
2535 c->dst.type = OP_REG;
2536 c->dst.bytes = c->src.bytes;
2537 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2538 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2540 return X86EMUL_CONTINUE;
2543 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2545 struct decode_cache *c = &ctxt->decode;
2548 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2549 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2550 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2551 return X86EMUL_CONTINUE;
2554 static int em_mov(struct x86_emulate_ctxt *ctxt)
2556 struct decode_cache *c = &ctxt->decode;
2557 c->dst.val = c->src.val;
2558 return X86EMUL_CONTINUE;
2561 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2563 struct decode_cache *c = &ctxt->decode;
2564 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2565 return X86EMUL_CONTINUE;
2568 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2570 struct decode_cache *c = &ctxt->decode;
2574 rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
2575 if (rc == X86EMUL_CONTINUE)
2576 emulate_invlpg(ctxt->vcpu, linear);
2577 /* Disable writeback. */
2578 c->dst.type = OP_NONE;
2579 return X86EMUL_CONTINUE;
2582 static bool valid_cr(int nr)
2594 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2596 struct decode_cache *c = &ctxt->decode;
2598 if (!valid_cr(c->modrm_reg))
2599 return emulate_ud(ctxt);
2601 return X86EMUL_CONTINUE;
2604 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2606 struct decode_cache *c = &ctxt->decode;
2607 u64 new_val = c->src.val64;
2608 int cr = c->modrm_reg;
2611 static u64 cr_reserved_bits[] = {
2612 0xffffffff00000000ULL,
2613 0, 0, 0, /* CR3 checked later */
2620 return emulate_ud(ctxt);
2622 if (new_val & cr_reserved_bits[cr])
2623 return emulate_gp(ctxt, 0);
2628 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2629 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2630 return emulate_gp(ctxt, 0);
2632 cr4 = ctxt->ops->get_cr(ctxt, 4);
2633 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2635 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2636 !(cr4 & X86_CR4_PAE))
2637 return emulate_gp(ctxt, 0);
2644 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2645 if (efer & EFER_LMA)
2646 rsvd = CR3_L_MODE_RESERVED_BITS;
2647 else if (is_pae(ctxt->vcpu))
2648 rsvd = CR3_PAE_RESERVED_BITS;
2649 else if (is_paging(ctxt->vcpu))
2650 rsvd = CR3_NONPAE_RESERVED_BITS;
2653 return emulate_gp(ctxt, 0);
2660 cr4 = ctxt->ops->get_cr(ctxt, 4);
2661 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2663 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2664 return emulate_gp(ctxt, 0);
2670 return X86EMUL_CONTINUE;
2673 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2677 ctxt->ops->get_dr(ctxt, 7, &dr7);
2679 /* Check if DR7.Global_Enable is set */
2680 return dr7 & (1 << 13);
2683 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2685 struct decode_cache *c = &ctxt->decode;
2686 int dr = c->modrm_reg;
2690 return emulate_ud(ctxt);
2692 cr4 = ctxt->ops->get_cr(ctxt, 4);
2693 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2694 return emulate_ud(ctxt);
2696 if (check_dr7_gd(ctxt))
2697 return emulate_db(ctxt);
2699 return X86EMUL_CONTINUE;
2702 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2704 struct decode_cache *c = &ctxt->decode;
2705 u64 new_val = c->src.val64;
2706 int dr = c->modrm_reg;
2708 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2709 return emulate_gp(ctxt, 0);
2711 return check_dr_read(ctxt);
2714 static int check_svme(struct x86_emulate_ctxt *ctxt)
2718 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2720 if (!(efer & EFER_SVME))
2721 return emulate_ud(ctxt);
2723 return X86EMUL_CONTINUE;
2726 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2728 u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
2730 /* Valid physical address? */
2731 if (rax & 0xffff000000000000)
2732 return emulate_gp(ctxt, 0);
2734 return check_svme(ctxt);
2737 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2739 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2741 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2742 return emulate_ud(ctxt);
2744 return X86EMUL_CONTINUE;
2747 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2749 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2750 u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
2752 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2754 return emulate_gp(ctxt, 0);
2756 return X86EMUL_CONTINUE;
2759 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2761 struct decode_cache *c = &ctxt->decode;
2763 c->dst.bytes = min(c->dst.bytes, 4u);
2764 if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes))
2765 return emulate_gp(ctxt, 0);
2767 return X86EMUL_CONTINUE;
2770 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2772 struct decode_cache *c = &ctxt->decode;
2774 c->src.bytes = min(c->src.bytes, 4u);
2775 if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes))
2776 return emulate_gp(ctxt, 0);
2778 return X86EMUL_CONTINUE;
2781 #define D(_y) { .flags = (_y) }
2782 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2783 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2784 .check_perm = (_p) }
2786 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2787 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2788 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2789 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2790 #define II(_f, _e, _i) \
2791 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2792 #define IIP(_f, _e, _i, _p) \
2793 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2794 .check_perm = (_p) }
2795 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2797 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2798 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2799 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2801 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2802 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2803 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2805 static struct opcode group7_rm1[] = {
2806 DI(SrcNone | ModRM | Priv, monitor),
2807 DI(SrcNone | ModRM | Priv, mwait),
2811 static struct opcode group7_rm3[] = {
2812 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
2813 DI(SrcNone | ModRM | Prot | VendorSpecific, vmmcall),
2814 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
2815 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
2816 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
2817 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
2818 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
2819 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
2822 static struct opcode group7_rm7[] = {
2824 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
2827 static struct opcode group1[] = {
2831 static struct opcode group1A[] = {
2832 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2835 static struct opcode group3[] = {
2836 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2837 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2838 X4(D(SrcMem | ModRM)),
2841 static struct opcode group4[] = {
2842 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2846 static struct opcode group5[] = {
2847 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2848 D(SrcMem | ModRM | Stack),
2849 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2850 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2851 D(SrcMem | ModRM | Stack), N,
2854 static struct opcode group6[] = {
2855 DI(ModRM | Prot, sldt),
2856 DI(ModRM | Prot, str),
2857 DI(ModRM | Prot | Priv, lldt),
2858 DI(ModRM | Prot | Priv, ltr),
2862 static struct group_dual group7 = { {
2863 DI(ModRM | Mov | DstMem | Priv, sgdt),
2864 DI(ModRM | Mov | DstMem | Priv, sidt),
2865 DI(ModRM | SrcMem | Priv, lgdt), DI(ModRM | SrcMem | Priv, lidt),
2866 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2867 DI(SrcMem16 | ModRM | Mov | Priv, lmsw),
2868 DI(SrcMem | ModRM | ByteOp | Priv | NoAccess, invlpg),
2870 D(SrcNone | ModRM | Priv | VendorSpecific), EXT(0, group7_rm1),
2871 N, EXT(0, group7_rm3),
2872 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2873 DI(SrcMem16 | ModRM | Mov | Priv, lmsw), EXT(0, group7_rm7),
2876 static struct opcode group8[] = {
2878 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2879 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2882 static struct group_dual group9 = { {
2883 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2885 N, N, N, N, N, N, N, N,
2888 static struct opcode group11[] = {
2889 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2892 static struct gprefix pfx_0f_6f_0f_7f = {
2893 N, N, N, I(Sse, em_movdqu),
2896 static struct opcode opcode_table[256] = {
2899 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2902 D(ImplicitOps | Stack | No64), N,
2905 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2908 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2912 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2920 X8(I(SrcReg | Stack, em_push)),
2922 X8(D(DstReg | Stack)),
2924 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2925 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2928 I(SrcImm | Mov | Stack, em_push),
2929 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2930 I(SrcImmByte | Mov | Stack, em_push),
2931 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2932 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
2933 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
2937 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2938 G(DstMem | SrcImm | ModRM | Group, group1),
2939 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2940 G(DstMem | SrcImmByte | ModRM | Group, group1),
2941 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2943 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2944 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2945 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2946 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2948 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
2950 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2951 I(SrcImmFAddr | No64, em_call_far), N,
2952 DI(ImplicitOps | Stack, pushf), DI(ImplicitOps | Stack, popf), N, N,
2954 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2955 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2956 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2957 D2bv(SrcSI | DstDI | String),
2959 D2bv(DstAcc | SrcImm),
2960 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2961 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2962 D2bv(SrcAcc | DstDI | String),
2964 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2966 X8(I(DstReg | SrcImm | Mov, em_mov)),
2968 D2bv(DstMem | SrcImmByte | ModRM),
2969 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2970 D(ImplicitOps | Stack),
2971 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2972 G(ByteOp, group11), G(0, group11),
2974 N, N, N, D(ImplicitOps | Stack),
2975 D(ImplicitOps), DI(SrcImmByte, intn),
2976 D(ImplicitOps | No64), DI(ImplicitOps, iret),
2978 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2981 N, N, N, N, N, N, N, N,
2984 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
2985 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
2987 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2988 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2989 D2bvIP(SrcNone | DstAcc, in, check_perm_in),
2990 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
2992 N, DI(ImplicitOps, icebp), N, N,
2993 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
2994 G(ByteOp, group3), G(0, group3),
2996 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2997 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3000 static struct opcode twobyte_table[256] = {
3002 G(0, group6), GD(0, &group7), N, N,
3003 N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
3004 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3005 N, D(ImplicitOps | ModRM), N, N,
3007 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3009 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3010 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3011 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3012 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3014 N, N, N, N, N, N, N, N,
3016 DI(ImplicitOps | Priv, wrmsr),
3017 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3018 DI(ImplicitOps | Priv, rdmsr),
3019 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3020 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
3022 N, N, N, N, N, N, N, N,
3024 X16(D(DstReg | SrcMem | ModRM | Mov)),
3026 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3031 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3036 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3040 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3042 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3043 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3044 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3045 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3047 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3048 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3049 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3050 D(DstMem | SrcReg | Src2CL | ModRM),
3051 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3053 D2bv(DstMem | SrcReg | ModRM | Lock),
3054 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3055 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3056 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3059 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3060 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3061 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3063 D2bv(DstMem | SrcReg | ModRM | Lock),
3064 N, D(DstMem | SrcReg | ModRM | Mov),
3065 N, N, N, GD(0, &group9),
3066 N, N, N, N, N, N, N, N,
3068 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3070 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3072 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3088 static unsigned imm_size(struct decode_cache *c)
3092 size = (c->d & ByteOp) ? 1 : c->op_bytes;
3098 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3099 unsigned size, bool sign_extension)
3101 struct decode_cache *c = &ctxt->decode;
3102 struct x86_emulate_ops *ops = ctxt->ops;
3103 int rc = X86EMUL_CONTINUE;
3107 op->addr.mem.ea = c->eip;
3108 /* NB. Immediates are sign-extended as necessary. */
3109 switch (op->bytes) {
3111 op->val = insn_fetch(s8, 1, c->eip);
3114 op->val = insn_fetch(s16, 2, c->eip);
3117 op->val = insn_fetch(s32, 4, c->eip);
3120 if (!sign_extension) {
3121 switch (op->bytes) {
3129 op->val &= 0xffffffff;
3138 x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3140 struct x86_emulate_ops *ops = ctxt->ops;
3141 struct decode_cache *c = &ctxt->decode;
3142 int rc = X86EMUL_CONTINUE;
3143 int mode = ctxt->mode;
3144 int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
3145 bool op_prefix = false;
3146 struct opcode opcode, *g_mod012, *g_mod3;
3147 struct operand memop = { .type = OP_NONE };
3150 c->fetch.start = c->eip;
3151 c->fetch.end = c->fetch.start + insn_len;
3153 memcpy(c->fetch.data, insn, insn_len);
3156 case X86EMUL_MODE_REAL:
3157 case X86EMUL_MODE_VM86:
3158 case X86EMUL_MODE_PROT16:
3159 def_op_bytes = def_ad_bytes = 2;
3161 case X86EMUL_MODE_PROT32:
3162 def_op_bytes = def_ad_bytes = 4;
3164 #ifdef CONFIG_X86_64
3165 case X86EMUL_MODE_PROT64:
3174 c->op_bytes = def_op_bytes;
3175 c->ad_bytes = def_ad_bytes;
3177 /* Legacy prefixes. */
3179 switch (c->b = insn_fetch(u8, 1, c->eip)) {
3180 case 0x66: /* operand-size override */
3182 /* switch between 2/4 bytes */
3183 c->op_bytes = def_op_bytes ^ 6;
3185 case 0x67: /* address-size override */
3186 if (mode == X86EMUL_MODE_PROT64)
3187 /* switch between 4/8 bytes */
3188 c->ad_bytes = def_ad_bytes ^ 12;
3190 /* switch between 2/4 bytes */
3191 c->ad_bytes = def_ad_bytes ^ 6;
3193 case 0x26: /* ES override */
3194 case 0x2e: /* CS override */
3195 case 0x36: /* SS override */
3196 case 0x3e: /* DS override */
3197 set_seg_override(c, (c->b >> 3) & 3);
3199 case 0x64: /* FS override */
3200 case 0x65: /* GS override */
3201 set_seg_override(c, c->b & 7);
3203 case 0x40 ... 0x4f: /* REX */
3204 if (mode != X86EMUL_MODE_PROT64)
3206 c->rex_prefix = c->b;
3208 case 0xf0: /* LOCK */
3211 case 0xf2: /* REPNE/REPNZ */
3212 case 0xf3: /* REP/REPE/REPZ */
3213 c->rep_prefix = c->b;
3219 /* Any legacy prefix after a REX prefix nullifies its effect. */
3227 if (c->rex_prefix & 8)
3228 c->op_bytes = 8; /* REX.W */
3230 /* Opcode byte(s). */
3231 opcode = opcode_table[c->b];
3232 /* Two-byte opcode? */
3235 c->b = insn_fetch(u8, 1, c->eip);
3236 opcode = twobyte_table[c->b];
3238 c->d = opcode.flags;
3241 dual = c->d & GroupDual;
3242 c->modrm = insn_fetch(u8, 1, c->eip);
3245 if (c->d & GroupDual) {
3246 g_mod012 = opcode.u.gdual->mod012;
3247 g_mod3 = opcode.u.gdual->mod3;
3249 g_mod012 = g_mod3 = opcode.u.group;
3251 c->d &= ~(Group | GroupDual);
3253 goffset = (c->modrm >> 3) & 7;
3255 if ((c->modrm >> 6) == 3)
3256 opcode = g_mod3[goffset];
3258 opcode = g_mod012[goffset];
3260 if (opcode.flags & RMExt) {
3261 goffset = c->modrm & 7;
3262 opcode = opcode.u.group[goffset];
3265 c->d |= opcode.flags;
3268 if (c->d & Prefix) {
3269 if (c->rep_prefix && op_prefix)
3270 return X86EMUL_UNHANDLEABLE;
3271 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
3272 switch (simd_prefix) {
3273 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3274 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3275 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3276 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3278 c->d |= opcode.flags;
3281 c->execute = opcode.u.execute;
3282 c->check_perm = opcode.check_perm;
3283 c->intercept = opcode.intercept;
3286 if (c->d == 0 || (c->d & Undefined))
3289 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3292 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
3295 if (c->d & Op3264) {
3296 if (mode == X86EMUL_MODE_PROT64)
3305 /* ModRM and SIB bytes. */
3307 rc = decode_modrm(ctxt, ops, &memop);
3308 if (!c->has_seg_override)
3309 set_seg_override(c, c->modrm_seg);
3310 } else if (c->d & MemAbs)
3311 rc = decode_abs(ctxt, ops, &memop);
3312 if (rc != X86EMUL_CONTINUE)
3315 if (!c->has_seg_override)
3316 set_seg_override(c, VCPU_SREG_DS);
3318 memop.addr.mem.seg = seg_override(ctxt, ops, c);
3320 if (memop.type == OP_MEM && c->ad_bytes != 8)
3321 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3323 if (memop.type == OP_MEM && c->rip_relative)
3324 memop.addr.mem.ea += c->eip;
3327 * Decode and fetch the source operand: register, memory
3330 switch (c->d & SrcMask) {
3334 decode_register_operand(ctxt, &c->src, c, 0);
3343 memop.bytes = (c->d & ByteOp) ? 1 :
3349 rc = decode_imm(ctxt, &c->src, 2, false);
3352 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
3355 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
3358 rc = decode_imm(ctxt, &c->src, 1, true);
3361 rc = decode_imm(ctxt, &c->src, 1, false);
3364 c->src.type = OP_REG;
3365 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3366 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
3367 fetch_register_operand(&c->src);
3374 c->src.type = OP_MEM;
3375 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3376 c->src.addr.mem.ea =
3377 register_address(c, c->regs[VCPU_REGS_RSI]);
3378 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
3382 c->src.type = OP_IMM;
3383 c->src.addr.mem.ea = c->eip;
3384 c->src.bytes = c->op_bytes + 2;
3385 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
3388 memop.bytes = c->op_bytes + 2;
3393 if (rc != X86EMUL_CONTINUE)
3397 * Decode and fetch the second source operand: register, memory
3400 switch (c->d & Src2Mask) {
3405 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
3408 rc = decode_imm(ctxt, &c->src2, 1, true);
3415 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3419 if (rc != X86EMUL_CONTINUE)
3422 /* Decode and fetch the destination operand: register or memory. */
3423 switch (c->d & DstMask) {
3425 decode_register_operand(ctxt, &c->dst, c,
3426 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3429 c->dst.type = OP_IMM;
3430 c->dst.addr.mem.ea = c->eip;
3432 c->dst.val = insn_fetch(u8, 1, c->eip);
3437 if ((c->d & DstMask) == DstMem64)
3440 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3442 fetch_bit_operand(c);
3443 c->dst.orig_val = c->dst.val;
3446 c->dst.type = OP_REG;
3447 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3448 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3449 fetch_register_operand(&c->dst);
3450 c->dst.orig_val = c->dst.val;
3453 c->dst.type = OP_MEM;
3454 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3455 c->dst.addr.mem.ea =
3456 register_address(c, c->regs[VCPU_REGS_RDI]);
3457 c->dst.addr.mem.seg = VCPU_SREG_ES;
3461 /* Special instructions do their own operand decoding. */
3463 c->dst.type = OP_NONE; /* Disable writeback. */
3468 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3471 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3473 struct decode_cache *c = &ctxt->decode;
3475 /* The second termination condition only applies for REPE
3476 * and REPNE. Test if the repeat string operation prefix is
3477 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3478 * corresponding termination condition according to:
3479 * - if REPE/REPZ and ZF = 0 then done
3480 * - if REPNE/REPNZ and ZF = 1 then done
3482 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3483 (c->b == 0xae) || (c->b == 0xaf))
3484 && (((c->rep_prefix == REPE_PREFIX) &&
3485 ((ctxt->eflags & EFLG_ZF) == 0))
3486 || ((c->rep_prefix == REPNE_PREFIX) &&
3487 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3494 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3496 struct x86_emulate_ops *ops = ctxt->ops;
3498 struct decode_cache *c = &ctxt->decode;
3499 int rc = X86EMUL_CONTINUE;
3500 int saved_dst_type = c->dst.type;
3501 int irq; /* Used for int 3, int, and into */
3502 struct desc_ptr desc_ptr;
3504 ctxt->decode.mem_read.pos = 0;
3506 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3507 rc = emulate_ud(ctxt);
3511 /* LOCK prefix is allowed only with some instructions */
3512 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3513 rc = emulate_ud(ctxt);
3517 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3518 rc = emulate_ud(ctxt);
3523 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3524 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3525 rc = emulate_ud(ctxt);
3529 if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3530 rc = emulate_nm(ctxt);
3534 if (unlikely(ctxt->guest_mode) && c->intercept) {
3535 rc = emulator_check_intercept(ctxt, c->intercept,
3536 X86_ICPT_PRE_EXCEPT);
3537 if (rc != X86EMUL_CONTINUE)
3541 /* Privileged instruction can be executed only in CPL=0 */
3542 if ((c->d & Priv) && ops->cpl(ctxt)) {
3543 rc = emulate_gp(ctxt, 0);
3547 /* Instruction can only be executed in protected mode */
3548 if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3549 rc = emulate_ud(ctxt);
3553 /* Do instruction specific permission checks */
3554 if (c->check_perm) {
3555 rc = c->check_perm(ctxt);
3556 if (rc != X86EMUL_CONTINUE)
3560 if (unlikely(ctxt->guest_mode) && c->intercept) {
3561 rc = emulator_check_intercept(ctxt, c->intercept,
3562 X86_ICPT_POST_EXCEPT);
3563 if (rc != X86EMUL_CONTINUE)
3567 if (c->rep_prefix && (c->d & String)) {
3568 /* All REP prefixes have the same first termination condition */
3569 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3575 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3576 rc = segmented_read(ctxt, c->src.addr.mem,
3577 c->src.valptr, c->src.bytes);
3578 if (rc != X86EMUL_CONTINUE)
3580 c->src.orig_val64 = c->src.val64;
3583 if (c->src2.type == OP_MEM) {
3584 rc = segmented_read(ctxt, c->src2.addr.mem,
3585 &c->src2.val, c->src2.bytes);
3586 if (rc != X86EMUL_CONTINUE)
3590 if ((c->d & DstMask) == ImplicitOps)
3594 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3595 /* optimisation - avoid slow emulated read if Mov */
3596 rc = segmented_read(ctxt, c->dst.addr.mem,
3597 &c->dst.val, c->dst.bytes);
3598 if (rc != X86EMUL_CONTINUE)
3601 c->dst.orig_val = c->dst.val;
3605 if (unlikely(ctxt->guest_mode) && c->intercept) {
3606 rc = emulator_check_intercept(ctxt, c->intercept,
3607 X86_ICPT_POST_MEMACCESS);
3608 if (rc != X86EMUL_CONTINUE)
3613 rc = c->execute(ctxt);
3614 if (rc != X86EMUL_CONTINUE)
3625 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3627 case 0x06: /* push es */
3628 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3630 case 0x07: /* pop es */
3631 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3635 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3637 case 0x0e: /* push cs */
3638 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3642 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3644 case 0x16: /* push ss */
3645 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3647 case 0x17: /* pop ss */
3648 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3652 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3654 case 0x1e: /* push ds */
3655 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3657 case 0x1f: /* pop ds */
3658 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3662 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3666 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3670 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3674 c->dst.type = OP_NONE; /* Disable writeback. */
3675 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3677 case 0x40 ... 0x47: /* inc r16/r32 */
3678 emulate_1op("inc", c->dst, ctxt->eflags);
3680 case 0x48 ... 0x4f: /* dec r16/r32 */
3681 emulate_1op("dec", c->dst, ctxt->eflags);
3683 case 0x58 ... 0x5f: /* pop reg */
3685 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3687 case 0x60: /* pusha */
3688 rc = emulate_pusha(ctxt);
3690 case 0x61: /* popa */
3691 rc = emulate_popa(ctxt, ops);
3693 case 0x63: /* movsxd */
3694 if (ctxt->mode != X86EMUL_MODE_PROT64)
3695 goto cannot_emulate;
3696 c->dst.val = (s32) c->src.val;
3698 case 0x6c: /* insb */
3699 case 0x6d: /* insw/insd */
3700 c->src.val = c->regs[VCPU_REGS_RDX];
3702 case 0x6e: /* outsb */
3703 case 0x6f: /* outsw/outsd */
3704 c->dst.val = c->regs[VCPU_REGS_RDX];
3707 case 0x70 ... 0x7f: /* jcc (short) */
3708 if (test_cc(c->b, ctxt->eflags))
3709 jmp_rel(c, c->src.val);
3711 case 0x80 ... 0x83: /* Grp1 */
3712 switch (c->modrm_reg) {
3733 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3735 case 0x86 ... 0x87: /* xchg */
3737 /* Write back the register source. */
3738 c->src.val = c->dst.val;
3739 write_register_operand(&c->src);
3741 * Write back the memory destination with implicit LOCK
3744 c->dst.val = c->src.orig_val;
3747 case 0x8c: /* mov r/m, sreg */
3748 if (c->modrm_reg > VCPU_SREG_GS) {
3749 rc = emulate_ud(ctxt);
3752 c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg);
3754 case 0x8d: /* lea r16/r32, m */
3755 c->dst.val = c->src.addr.mem.ea;
3757 case 0x8e: { /* mov seg, r/m16 */
3762 if (c->modrm_reg == VCPU_SREG_CS ||
3763 c->modrm_reg > VCPU_SREG_GS) {
3764 rc = emulate_ud(ctxt);
3768 if (c->modrm_reg == VCPU_SREG_SS)
3769 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3771 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3773 c->dst.type = OP_NONE; /* Disable writeback. */
3776 case 0x8f: /* pop (sole member of Grp1a) */
3777 rc = emulate_grp1a(ctxt, ops);
3779 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3780 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3783 case 0x98: /* cbw/cwde/cdqe */
3784 switch (c->op_bytes) {
3785 case 2: c->dst.val = (s8)c->dst.val; break;
3786 case 4: c->dst.val = (s16)c->dst.val; break;
3787 case 8: c->dst.val = (s32)c->dst.val; break;
3790 case 0x9c: /* pushf */
3791 c->src.val = (unsigned long) ctxt->eflags;
3794 case 0x9d: /* popf */
3795 c->dst.type = OP_REG;
3796 c->dst.addr.reg = &ctxt->eflags;
3797 c->dst.bytes = c->op_bytes;
3798 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3800 case 0xa6 ... 0xa7: /* cmps */
3802 case 0xa8 ... 0xa9: /* test ax, imm */
3804 case 0xae ... 0xaf: /* scas */
3809 case 0xc3: /* ret */
3810 c->dst.type = OP_REG;
3811 c->dst.addr.reg = &c->eip;
3812 c->dst.bytes = c->op_bytes;
3813 goto pop_instruction;
3814 case 0xc4: /* les */
3815 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3817 case 0xc5: /* lds */
3818 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3820 case 0xcb: /* ret far */
3821 rc = emulate_ret_far(ctxt, ops);
3823 case 0xcc: /* int3 */
3826 case 0xcd: /* int n */
3829 rc = emulate_int(ctxt, ops, irq);
3831 case 0xce: /* into */
3832 if (ctxt->eflags & EFLG_OF) {
3837 case 0xcf: /* iret */
3838 rc = emulate_iret(ctxt, ops);
3840 case 0xd0 ... 0xd1: /* Grp2 */
3843 case 0xd2 ... 0xd3: /* Grp2 */
3844 c->src.val = c->regs[VCPU_REGS_RCX];
3847 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3848 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3849 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3850 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3851 jmp_rel(c, c->src.val);
3853 case 0xe3: /* jcxz/jecxz/jrcxz */
3854 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3855 jmp_rel(c, c->src.val);
3857 case 0xe4: /* inb */
3860 case 0xe6: /* outb */
3861 case 0xe7: /* out */
3863 case 0xe8: /* call (near) */ {
3864 long int rel = c->src.val;
3865 c->src.val = (unsigned long) c->eip;
3870 case 0xe9: /* jmp rel */
3872 case 0xea: { /* jmp far */
3875 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3877 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3881 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3885 jmp: /* jmp rel short */
3886 jmp_rel(c, c->src.val);
3887 c->dst.type = OP_NONE; /* Disable writeback. */
3889 case 0xec: /* in al,dx */
3890 case 0xed: /* in (e/r)ax,dx */
3891 c->src.val = c->regs[VCPU_REGS_RDX];
3893 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3895 goto done; /* IO is needed */
3897 case 0xee: /* out dx,al */
3898 case 0xef: /* out dx,(e/r)ax */
3899 c->dst.val = c->regs[VCPU_REGS_RDX];
3901 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
3903 c->dst.type = OP_NONE; /* Disable writeback. */
3905 case 0xf4: /* hlt */
3906 ctxt->vcpu->arch.halt_request = 1;
3908 case 0xf5: /* cmc */
3909 /* complement carry flag from eflags reg */
3910 ctxt->eflags ^= EFLG_CF;
3912 case 0xf6 ... 0xf7: /* Grp3 */
3913 rc = emulate_grp3(ctxt, ops);
3915 case 0xf8: /* clc */
3916 ctxt->eflags &= ~EFLG_CF;
3918 case 0xf9: /* stc */
3919 ctxt->eflags |= EFLG_CF;
3921 case 0xfa: /* cli */
3922 if (emulator_bad_iopl(ctxt, ops)) {
3923 rc = emulate_gp(ctxt, 0);
3926 ctxt->eflags &= ~X86_EFLAGS_IF;
3928 case 0xfb: /* sti */
3929 if (emulator_bad_iopl(ctxt, ops)) {
3930 rc = emulate_gp(ctxt, 0);
3933 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3934 ctxt->eflags |= X86_EFLAGS_IF;
3937 case 0xfc: /* cld */
3938 ctxt->eflags &= ~EFLG_DF;
3940 case 0xfd: /* std */
3941 ctxt->eflags |= EFLG_DF;
3943 case 0xfe: /* Grp4 */
3945 rc = emulate_grp45(ctxt);
3947 case 0xff: /* Grp5 */
3948 if (c->modrm_reg == 5)
3952 goto cannot_emulate;
3955 if (rc != X86EMUL_CONTINUE)
3959 rc = writeback(ctxt, ops);
3960 if (rc != X86EMUL_CONTINUE)
3964 * restore dst type in case the decoding will be reused
3965 * (happens for string instruction )
3967 c->dst.type = saved_dst_type;
3969 if ((c->d & SrcMask) == SrcSI)
3970 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3971 VCPU_REGS_RSI, &c->src);
3973 if ((c->d & DstMask) == DstDI)
3974 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3977 if (c->rep_prefix && (c->d & String)) {
3978 struct read_cache *r = &ctxt->decode.io_read;
3979 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3981 if (!string_insn_completed(ctxt)) {
3983 * Re-enter guest when pio read ahead buffer is empty
3984 * or, if it is not used, after each 1024 iteration.
3986 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3987 (r->end == 0 || r->end != r->pos)) {
3989 * Reset read cache. Usually happens before
3990 * decode, but since instruction is restarted
3991 * we have to do it here.
3993 ctxt->decode.mem_read.end = 0;
3994 return EMULATION_RESTART;
3996 goto done; /* skip rip writeback */
4003 if (rc == X86EMUL_PROPAGATE_FAULT)
4004 ctxt->have_exception = true;
4005 if (rc == X86EMUL_INTERCEPTED)
4006 return EMULATION_INTERCEPTED;
4008 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4012 case 0x01: /* lgdt, lidt, lmsw */
4013 switch (c->modrm_reg) {
4014 case 0: /* vmcall */
4015 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4016 goto cannot_emulate;
4018 rc = kvm_fix_hypercall(ctxt->vcpu);
4019 if (rc != X86EMUL_CONTINUE)
4022 /* Let the processor re-execute the fixed hypercall */
4024 /* Disable writeback. */
4025 c->dst.type = OP_NONE;
4028 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
4029 &desc_ptr.size, &desc_ptr.address,
4031 if (rc != X86EMUL_CONTINUE)
4033 ctxt->ops->set_gdt(ctxt, &desc_ptr);
4034 /* Disable writeback. */
4035 c->dst.type = OP_NONE;
4037 case 3: /* lidt/vmmcall */
4038 if (c->modrm_mod == 3) {
4039 switch (c->modrm_rm) {
4041 rc = kvm_fix_hypercall(ctxt->vcpu);
4044 goto cannot_emulate;
4047 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
4051 if (rc != X86EMUL_CONTINUE)
4053 ctxt->ops->set_idt(ctxt, &desc_ptr);
4055 /* Disable writeback. */
4056 c->dst.type = OP_NONE;
4060 c->dst.val = ops->get_cr(ctxt, 0);
4063 ops->set_cr(ctxt, 0, (ops->get_cr(ctxt, 0) & ~0x0eul) |
4064 (c->src.val & 0x0f));
4065 c->dst.type = OP_NONE;
4067 case 5: /* not defined */
4069 rc = X86EMUL_PROPAGATE_FAULT;
4072 rc = em_invlpg(ctxt);
4075 goto cannot_emulate;
4078 case 0x05: /* syscall */
4079 rc = emulate_syscall(ctxt, ops);
4082 emulate_clts(ctxt->vcpu);
4084 case 0x09: /* wbinvd */
4085 kvm_emulate_wbinvd(ctxt->vcpu);
4087 case 0x08: /* invd */
4088 case 0x0d: /* GrpP (prefetch) */
4089 case 0x18: /* Grp16 (prefetch/nop) */
4091 case 0x20: /* mov cr, reg */
4092 c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
4094 case 0x21: /* mov from dr to reg */
4095 ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
4097 case 0x22: /* mov reg, cr */
4098 if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
4099 emulate_gp(ctxt, 0);
4100 rc = X86EMUL_PROPAGATE_FAULT;
4103 c->dst.type = OP_NONE;
4105 case 0x23: /* mov from reg to dr */
4106 if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
4107 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4108 ~0ULL : ~0U)) < 0) {
4109 /* #UD condition is already handled by the code above */
4110 emulate_gp(ctxt, 0);
4111 rc = X86EMUL_PROPAGATE_FAULT;
4115 c->dst.type = OP_NONE; /* no writeback */
4119 msr_data = (u32)c->regs[VCPU_REGS_RAX]
4120 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
4121 if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
4122 emulate_gp(ctxt, 0);
4123 rc = X86EMUL_PROPAGATE_FAULT;
4126 rc = X86EMUL_CONTINUE;
4130 if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
4131 emulate_gp(ctxt, 0);
4132 rc = X86EMUL_PROPAGATE_FAULT;
4135 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
4136 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
4138 rc = X86EMUL_CONTINUE;
4140 case 0x34: /* sysenter */
4141 rc = emulate_sysenter(ctxt, ops);
4143 case 0x35: /* sysexit */
4144 rc = emulate_sysexit(ctxt, ops);
4146 case 0x40 ... 0x4f: /* cmov */
4147 c->dst.val = c->dst.orig_val = c->src.val;
4148 if (!test_cc(c->b, ctxt->eflags))
4149 c->dst.type = OP_NONE; /* no writeback */
4151 case 0x80 ... 0x8f: /* jnz rel, etc*/
4152 if (test_cc(c->b, ctxt->eflags))
4153 jmp_rel(c, c->src.val);
4155 case 0x90 ... 0x9f: /* setcc r/m8 */
4156 c->dst.val = test_cc(c->b, ctxt->eflags);
4158 case 0xa0: /* push fs */
4159 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
4161 case 0xa1: /* pop fs */
4162 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
4166 c->dst.type = OP_NONE;
4167 /* only subword offset */
4168 c->src.val &= (c->dst.bytes << 3) - 1;
4169 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
4171 case 0xa4: /* shld imm8, r, r/m */
4172 case 0xa5: /* shld cl, r, r/m */
4173 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
4175 case 0xa8: /* push gs */
4176 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
4178 case 0xa9: /* pop gs */
4179 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
4183 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
4185 case 0xac: /* shrd imm8, r, r/m */
4186 case 0xad: /* shrd cl, r, r/m */
4187 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
4189 case 0xae: /* clflush */
4191 case 0xb0 ... 0xb1: /* cmpxchg */
4193 * Save real source value, then compare EAX against
4196 c->src.orig_val = c->src.val;
4197 c->src.val = c->regs[VCPU_REGS_RAX];
4198 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
4199 if (ctxt->eflags & EFLG_ZF) {
4200 /* Success: write back to memory. */
4201 c->dst.val = c->src.orig_val;
4203 /* Failure: write the value we saw to EAX. */
4204 c->dst.type = OP_REG;
4205 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
4208 case 0xb2: /* lss */
4209 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
4213 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4215 case 0xb4: /* lfs */
4216 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
4218 case 0xb5: /* lgs */
4219 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
4221 case 0xb6 ... 0xb7: /* movzx */
4222 c->dst.bytes = c->op_bytes;
4223 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
4226 case 0xba: /* Grp8 */
4227 switch (c->modrm_reg & 3) {
4240 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
4242 case 0xbc: { /* bsf */
4244 __asm__ ("bsf %2, %0; setz %1"
4245 : "=r"(c->dst.val), "=q"(zf)
4247 ctxt->eflags &= ~X86_EFLAGS_ZF;
4249 ctxt->eflags |= X86_EFLAGS_ZF;
4250 c->dst.type = OP_NONE; /* Disable writeback. */
4254 case 0xbd: { /* bsr */
4256 __asm__ ("bsr %2, %0; setz %1"
4257 : "=r"(c->dst.val), "=q"(zf)
4259 ctxt->eflags &= ~X86_EFLAGS_ZF;
4261 ctxt->eflags |= X86_EFLAGS_ZF;
4262 c->dst.type = OP_NONE; /* Disable writeback. */
4266 case 0xbe ... 0xbf: /* movsx */
4267 c->dst.bytes = c->op_bytes;
4268 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
4271 case 0xc0 ... 0xc1: /* xadd */
4272 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
4273 /* Write back the register source. */
4274 c->src.val = c->dst.orig_val;
4275 write_register_operand(&c->src);
4277 case 0xc3: /* movnti */
4278 c->dst.bytes = c->op_bytes;
4279 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
4282 case 0xc7: /* Grp9 (cmpxchg8b) */
4283 rc = emulate_grp9(ctxt, ops);
4286 goto cannot_emulate;
4289 if (rc != X86EMUL_CONTINUE)
4295 return EMULATION_FAILED;