1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
80 #define RMExt (1<<18) /* Opcode extension in ModRM r/m if mod == 3 */
82 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
83 #define VendorSpecific (1<<22) /* Vendor specific instruction */
84 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
85 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
86 #define Undefined (1<<25) /* No Such Instruction */
87 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
88 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90 /* Source 2 operand type */
91 #define Src2None (0<<29)
92 #define Src2CL (1<<29)
93 #define Src2ImmByte (2<<29)
94 #define Src2One (3<<29)
95 #define Src2Imm (4<<29)
96 #define Src2Mask (7<<29)
99 #define X3(x...) X2(x), x
100 #define X4(x...) X2(x), X2(x)
101 #define X5(x...) X4(x), x
102 #define X6(x...) X4(x), X2(x)
103 #define X7(x...) X4(x), X3(x)
104 #define X8(x...) X4(x), X4(x)
105 #define X16(x...) X8(x), X8(x)
111 int (*execute)(struct x86_emulate_ctxt *ctxt);
112 struct opcode *group;
113 struct group_dual *gdual;
114 struct gprefix *gprefix;
116 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120 struct opcode mod012[8];
121 struct opcode mod3[8];
125 struct opcode pfx_no;
126 struct opcode pfx_66;
127 struct opcode pfx_f2;
128 struct opcode pfx_f3;
131 /* EFLAGS bit definitions. */
132 #define EFLG_ID (1<<21)
133 #define EFLG_VIP (1<<20)
134 #define EFLG_VIF (1<<19)
135 #define EFLG_AC (1<<18)
136 #define EFLG_VM (1<<17)
137 #define EFLG_RF (1<<16)
138 #define EFLG_IOPL (3<<12)
139 #define EFLG_NT (1<<14)
140 #define EFLG_OF (1<<11)
141 #define EFLG_DF (1<<10)
142 #define EFLG_IF (1<<9)
143 #define EFLG_TF (1<<8)
144 #define EFLG_SF (1<<7)
145 #define EFLG_ZF (1<<6)
146 #define EFLG_AF (1<<4)
147 #define EFLG_PF (1<<2)
148 #define EFLG_CF (1<<0)
150 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
151 #define EFLG_RESERVED_ONE_MASK 2
154 * Instruction emulation:
155 * Most instructions are emulated directly via a fragment of inline assembly
156 * code. This allows us to save/restore EFLAGS and thus very easily pick up
157 * any modified flags.
160 #if defined(CONFIG_X86_64)
161 #define _LO32 "k" /* force 32-bit operand */
162 #define _STK "%%rsp" /* stack pointer */
163 #elif defined(__i386__)
164 #define _LO32 "" /* force 32-bit operand */
165 #define _STK "%%esp" /* stack pointer */
169 * These EFLAGS bits are restored from saved value during emulation, and
170 * any changes are written back to the saved value after emulation.
172 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
174 /* Before executing instruction: restore necessary bits in EFLAGS. */
175 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
176 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
177 "movl %"_sav",%"_LO32 _tmp"; " \
180 "movl %"_msk",%"_LO32 _tmp"; " \
181 "andl %"_LO32 _tmp",("_STK"); " \
183 "notl %"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
187 "orl %"_LO32 _tmp",("_STK"); " \
191 /* After executing instruction: write-back necessary bits in EFLAGS. */
192 #define _POST_EFLAGS(_sav, _msk, _tmp) \
193 /* _sav |= EFLAGS & _msk; */ \
196 "andl %"_msk",%"_LO32 _tmp"; " \
197 "orl %"_LO32 _tmp",%"_sav"; "
205 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
207 __asm__ __volatile__ ( \
208 _PRE_EFLAGS("0", "4", "2") \
209 _op _suffix " %"_x"3,%1; " \
210 _POST_EFLAGS("0", "4", "2") \
211 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
213 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 /* Raw emulation: instruction has two explicit operands. */
218 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
220 unsigned long _tmp; \
222 switch ((_dst).bytes) { \
224 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
227 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
230 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
235 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
237 unsigned long _tmp; \
238 switch ((_dst).bytes) { \
240 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
243 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
244 _wx, _wy, _lx, _ly, _qx, _qy); \
249 /* Source operand is byte-sized and may be restricted to just %cl. */
250 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
251 __emulate_2op(_op, _src, _dst, _eflags, \
252 "b", "c", "b", "c", "b", "c", "b", "c")
254 /* Source operand is byte, word, long or quad sized. */
255 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
256 __emulate_2op(_op, _src, _dst, _eflags, \
257 "b", "q", "w", "r", _LO32, "r", "", "r")
259 /* Source operand is word, long or quad sized. */
260 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
261 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
262 "w", "r", _LO32, "r", "", "r")
264 /* Instruction has three operands and one operand is stored in ECX register */
265 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
267 unsigned long _tmp; \
268 _type _clv = (_cl).val; \
269 _type _srcv = (_src).val; \
270 _type _dstv = (_dst).val; \
272 __asm__ __volatile__ ( \
273 _PRE_EFLAGS("0", "5", "2") \
274 _op _suffix " %4,%1 \n" \
275 _POST_EFLAGS("0", "5", "2") \
276 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
277 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
280 (_cl).val = (unsigned long) _clv; \
281 (_src).val = (unsigned long) _srcv; \
282 (_dst).val = (unsigned long) _dstv; \
285 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
287 switch ((_dst).bytes) { \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "w", unsigned short); \
293 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "l", unsigned int); \
297 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
298 "q", unsigned long)); \
303 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
305 unsigned long _tmp; \
307 __asm__ __volatile__ ( \
308 _PRE_EFLAGS("0", "3", "2") \
309 _op _suffix " %1; " \
310 _POST_EFLAGS("0", "3", "2") \
311 : "=m" (_eflags), "+m" ((_dst).val), \
313 : "i" (EFLAGS_MASK)); \
316 /* Instruction has only one explicit operand (no source operand). */
317 #define emulate_1op(_op, _dst, _eflags) \
319 switch ((_dst).bytes) { \
320 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
321 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
322 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
323 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
329 unsigned long _tmp; \
331 __asm__ __volatile__ ( \
332 _PRE_EFLAGS("0", "4", "1") \
333 _op _suffix " %5; " \
334 _POST_EFLAGS("0", "4", "1") \
335 : "=m" (_eflags), "=&r" (_tmp), \
336 "+a" (_rax), "+d" (_rdx) \
337 : "i" (EFLAGS_MASK), "m" ((_src).val), \
338 "a" (_rax), "d" (_rdx)); \
341 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
343 unsigned long _tmp; \
345 __asm__ __volatile__ ( \
346 _PRE_EFLAGS("0", "5", "1") \
348 _op _suffix " %6; " \
350 _POST_EFLAGS("0", "5", "1") \
351 ".pushsection .fixup,\"ax\" \n\t" \
352 "3: movb $1, %4 \n\t" \
355 _ASM_EXTABLE(1b, 3b) \
356 : "=m" (_eflags), "=&r" (_tmp), \
357 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
358 : "i" (EFLAGS_MASK), "m" ((_src).val), \
359 "a" (_rax), "d" (_rdx)); \
362 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
363 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
365 switch((_src).bytes) { \
366 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
367 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
368 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
369 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
373 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
375 switch((_src).bytes) { \
377 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
378 _eflags, "b", _ex); \
381 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
382 _eflags, "w", _ex); \
385 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
386 _eflags, "l", _ex); \
389 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
390 _eflags, "q", _ex)); \
395 /* Fetch next part of the instruction being emulated. */
396 #define insn_fetch(_type, _size, _eip) \
397 ({ unsigned long _x; \
398 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
399 if (rc != X86EMUL_CONTINUE) \
405 #define insn_fetch_arr(_arr, _size, _eip) \
406 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
407 if (rc != X86EMUL_CONTINUE) \
412 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
413 enum x86_intercept intercept,
414 enum x86_intercept_stage stage)
416 struct x86_instruction_info info = {
417 .intercept = intercept,
418 .rep_prefix = ctxt->decode.rep_prefix,
419 .modrm_mod = ctxt->decode.modrm_mod,
420 .modrm_reg = ctxt->decode.modrm_reg,
421 .modrm_rm = ctxt->decode.modrm_rm,
422 .src_val = ctxt->decode.src.val64,
423 .src_bytes = ctxt->decode.src.bytes,
424 .dst_bytes = ctxt->decode.dst.bytes,
425 .ad_bytes = ctxt->decode.ad_bytes,
426 .next_rip = ctxt->eip,
429 return ctxt->ops->intercept(ctxt->vcpu, &info, stage);
432 static inline unsigned long ad_mask(struct decode_cache *c)
434 return (1UL << (c->ad_bytes << 3)) - 1;
437 /* Access/update address held in a register, based on addressing mode. */
438 static inline unsigned long
439 address_mask(struct decode_cache *c, unsigned long reg)
441 if (c->ad_bytes == sizeof(unsigned long))
444 return reg & ad_mask(c);
447 static inline unsigned long
448 register_address(struct decode_cache *c, unsigned long reg)
450 return address_mask(c, reg);
454 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
456 if (c->ad_bytes == sizeof(unsigned long))
459 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
462 static inline void jmp_rel(struct decode_cache *c, int rel)
464 register_address_increment(c, &c->eip, rel);
467 static void set_seg_override(struct decode_cache *c, int seg)
469 c->has_seg_override = true;
470 c->seg_override = seg;
473 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
474 struct x86_emulate_ops *ops, int seg)
476 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
479 return ops->get_cached_segment_base(seg, ctxt->vcpu);
482 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
483 struct x86_emulate_ops *ops,
484 struct decode_cache *c)
486 if (!c->has_seg_override)
489 return c->seg_override;
492 static ulong linear(struct x86_emulate_ctxt *ctxt,
493 struct segmented_address addr)
495 struct decode_cache *c = &ctxt->decode;
498 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
499 if (c->ad_bytes != 8)
504 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
505 u32 error, bool valid)
507 ctxt->exception.vector = vec;
508 ctxt->exception.error_code = error;
509 ctxt->exception.error_code_valid = valid;
510 return X86EMUL_PROPAGATE_FAULT;
513 static int emulate_db(struct x86_emulate_ctxt *ctxt)
515 return emulate_exception(ctxt, DB_VECTOR, 0, false);
518 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
520 return emulate_exception(ctxt, GP_VECTOR, err, true);
523 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
525 return emulate_exception(ctxt, UD_VECTOR, 0, false);
528 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
530 return emulate_exception(ctxt, TS_VECTOR, err, true);
533 static int emulate_de(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DE_VECTOR, 0, false);
538 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
540 return emulate_exception(ctxt, NM_VECTOR, 0, false);
543 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
544 struct x86_emulate_ops *ops,
545 unsigned long eip, u8 *dest)
547 struct fetch_cache *fc = &ctxt->decode.fetch;
551 if (eip == fc->end) {
552 cur_size = fc->end - fc->start;
553 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
554 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
555 size, ctxt->vcpu, &ctxt->exception);
556 if (rc != X86EMUL_CONTINUE)
560 *dest = fc->data[eip - fc->start];
561 return X86EMUL_CONTINUE;
564 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
565 struct x86_emulate_ops *ops,
566 unsigned long eip, void *dest, unsigned size)
570 /* x86 instructions are limited to 15 bytes. */
571 if (eip + size - ctxt->eip > 15)
572 return X86EMUL_UNHANDLEABLE;
574 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
575 if (rc != X86EMUL_CONTINUE)
578 return X86EMUL_CONTINUE;
582 * Given the 'reg' portion of a ModRM byte, and a register block, return a
583 * pointer into the block that addresses the relevant register.
584 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
586 static void *decode_register(u8 modrm_reg, unsigned long *regs,
591 p = ®s[modrm_reg];
592 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
593 p = (unsigned char *)®s[modrm_reg & 3] + 1;
597 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
598 struct x86_emulate_ops *ops,
599 struct segmented_address addr,
600 u16 *size, unsigned long *address, int op_bytes)
607 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
608 ctxt->vcpu, &ctxt->exception);
609 if (rc != X86EMUL_CONTINUE)
612 rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
613 ctxt->vcpu, &ctxt->exception);
617 static int test_cc(unsigned int condition, unsigned int flags)
621 switch ((condition & 15) >> 1) {
623 rc |= (flags & EFLG_OF);
625 case 1: /* b/c/nae */
626 rc |= (flags & EFLG_CF);
629 rc |= (flags & EFLG_ZF);
632 rc |= (flags & (EFLG_CF|EFLG_ZF));
635 rc |= (flags & EFLG_SF);
638 rc |= (flags & EFLG_PF);
641 rc |= (flags & EFLG_ZF);
644 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
648 /* Odd condition identifiers (lsb == 1) have inverted sense. */
649 return (!!rc ^ (condition & 1));
652 static void fetch_register_operand(struct operand *op)
656 op->val = *(u8 *)op->addr.reg;
659 op->val = *(u16 *)op->addr.reg;
662 op->val = *(u32 *)op->addr.reg;
665 op->val = *(u64 *)op->addr.reg;
670 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
672 ctxt->ops->get_fpu(ctxt);
674 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
675 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
676 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
677 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
678 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
679 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
680 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
681 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
683 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
684 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
685 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
686 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
687 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
688 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
689 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
690 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
694 ctxt->ops->put_fpu(ctxt);
697 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
700 ctxt->ops->get_fpu(ctxt);
702 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
703 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
704 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
705 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
706 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
707 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
708 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
709 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
711 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
712 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
713 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
714 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
715 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
716 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
717 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
718 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
722 ctxt->ops->put_fpu(ctxt);
725 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
727 struct decode_cache *c,
730 unsigned reg = c->modrm_reg;
731 int highbyte_regs = c->rex_prefix == 0;
734 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
740 read_sse_reg(ctxt, &op->vec_val, reg);
745 if ((c->d & ByteOp) && !inhibit_bytereg) {
746 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
749 op->addr.reg = decode_register(reg, c->regs, 0);
750 op->bytes = c->op_bytes;
752 fetch_register_operand(op);
753 op->orig_val = op->val;
756 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
757 struct x86_emulate_ops *ops,
760 struct decode_cache *c = &ctxt->decode;
762 int index_reg = 0, base_reg = 0, scale;
763 int rc = X86EMUL_CONTINUE;
767 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
768 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
769 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
772 c->modrm = insn_fetch(u8, 1, c->eip);
773 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
774 c->modrm_reg |= (c->modrm & 0x38) >> 3;
775 c->modrm_rm |= (c->modrm & 0x07);
776 c->modrm_seg = VCPU_SREG_DS;
778 if (c->modrm_mod == 3) {
780 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
781 op->addr.reg = decode_register(c->modrm_rm,
782 c->regs, c->d & ByteOp);
786 op->addr.xmm = c->modrm_rm;
787 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
790 fetch_register_operand(op);
796 if (c->ad_bytes == 2) {
797 unsigned bx = c->regs[VCPU_REGS_RBX];
798 unsigned bp = c->regs[VCPU_REGS_RBP];
799 unsigned si = c->regs[VCPU_REGS_RSI];
800 unsigned di = c->regs[VCPU_REGS_RDI];
802 /* 16-bit ModR/M decode. */
803 switch (c->modrm_mod) {
805 if (c->modrm_rm == 6)
806 modrm_ea += insn_fetch(u16, 2, c->eip);
809 modrm_ea += insn_fetch(s8, 1, c->eip);
812 modrm_ea += insn_fetch(u16, 2, c->eip);
815 switch (c->modrm_rm) {
835 if (c->modrm_mod != 0)
842 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
843 (c->modrm_rm == 6 && c->modrm_mod != 0))
844 c->modrm_seg = VCPU_SREG_SS;
845 modrm_ea = (u16)modrm_ea;
847 /* 32/64-bit ModR/M decode. */
848 if ((c->modrm_rm & 7) == 4) {
849 sib = insn_fetch(u8, 1, c->eip);
850 index_reg |= (sib >> 3) & 7;
854 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
855 modrm_ea += insn_fetch(s32, 4, c->eip);
857 modrm_ea += c->regs[base_reg];
859 modrm_ea += c->regs[index_reg] << scale;
860 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
861 if (ctxt->mode == X86EMUL_MODE_PROT64)
864 modrm_ea += c->regs[c->modrm_rm];
865 switch (c->modrm_mod) {
867 if (c->modrm_rm == 5)
868 modrm_ea += insn_fetch(s32, 4, c->eip);
871 modrm_ea += insn_fetch(s8, 1, c->eip);
874 modrm_ea += insn_fetch(s32, 4, c->eip);
878 op->addr.mem.ea = modrm_ea;
883 static int decode_abs(struct x86_emulate_ctxt *ctxt,
884 struct x86_emulate_ops *ops,
887 struct decode_cache *c = &ctxt->decode;
888 int rc = X86EMUL_CONTINUE;
891 switch (c->ad_bytes) {
893 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
896 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
899 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
906 static void fetch_bit_operand(struct decode_cache *c)
910 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
911 mask = ~(c->dst.bytes * 8 - 1);
913 if (c->src.bytes == 2)
914 sv = (s16)c->src.val & (s16)mask;
915 else if (c->src.bytes == 4)
916 sv = (s32)c->src.val & (s32)mask;
918 c->dst.addr.mem.ea += (sv >> 3);
921 /* only subword offset */
922 c->src.val &= (c->dst.bytes << 3) - 1;
925 static int read_emulated(struct x86_emulate_ctxt *ctxt,
926 struct x86_emulate_ops *ops,
927 unsigned long addr, void *dest, unsigned size)
930 struct read_cache *mc = &ctxt->decode.mem_read;
933 int n = min(size, 8u);
935 if (mc->pos < mc->end)
938 rc = ops->read_emulated(addr, mc->data + mc->end, n,
939 &ctxt->exception, ctxt->vcpu);
940 if (rc != X86EMUL_CONTINUE)
945 memcpy(dest, mc->data + mc->pos, n);
950 return X86EMUL_CONTINUE;
953 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
954 struct x86_emulate_ops *ops,
955 unsigned int size, unsigned short port,
958 struct read_cache *rc = &ctxt->decode.io_read;
960 if (rc->pos == rc->end) { /* refill pio read ahead */
961 struct decode_cache *c = &ctxt->decode;
962 unsigned int in_page, n;
963 unsigned int count = c->rep_prefix ?
964 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
965 in_page = (ctxt->eflags & EFLG_DF) ?
966 offset_in_page(c->regs[VCPU_REGS_RDI]) :
967 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
968 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
972 rc->pos = rc->end = 0;
973 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
978 memcpy(dest, rc->data + rc->pos, size);
983 static u32 desc_limit_scaled(struct desc_struct *desc)
985 u32 limit = get_desc_limit(desc);
987 return desc->g ? (limit << 12) | 0xfff : limit;
990 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
991 struct x86_emulate_ops *ops,
992 u16 selector, struct desc_ptr *dt)
994 if (selector & 1 << 2) {
995 struct desc_struct desc;
996 memset (dt, 0, sizeof *dt);
997 if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
1001 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1002 dt->address = get_desc_base(&desc);
1004 ops->get_gdt(dt, ctxt->vcpu);
1007 /* allowed just for 8 bytes segments */
1008 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1009 struct x86_emulate_ops *ops,
1010 u16 selector, struct desc_struct *desc)
1013 u16 index = selector >> 3;
1017 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1019 if (dt.size < index * 8 + 7)
1020 return emulate_gp(ctxt, selector & 0xfffc);
1021 addr = dt.address + index * 8;
1022 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
1028 /* allowed just for 8 bytes segments */
1029 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1030 struct x86_emulate_ops *ops,
1031 u16 selector, struct desc_struct *desc)
1034 u16 index = selector >> 3;
1038 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1040 if (dt.size < index * 8 + 7)
1041 return emulate_gp(ctxt, selector & 0xfffc);
1043 addr = dt.address + index * 8;
1044 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
1050 /* Does not support long mode */
1051 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1052 struct x86_emulate_ops *ops,
1053 u16 selector, int seg)
1055 struct desc_struct seg_desc;
1057 unsigned err_vec = GP_VECTOR;
1059 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1062 memset(&seg_desc, 0, sizeof seg_desc);
1064 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1065 || ctxt->mode == X86EMUL_MODE_REAL) {
1066 /* set real mode segment descriptor */
1067 set_desc_base(&seg_desc, selector << 4);
1068 set_desc_limit(&seg_desc, 0xffff);
1075 /* NULL selector is not valid for TR, CS and SS */
1076 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1080 /* TR should be in GDT only */
1081 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1084 if (null_selector) /* for NULL selector skip all following checks */
1087 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1088 if (ret != X86EMUL_CONTINUE)
1091 err_code = selector & 0xfffc;
1092 err_vec = GP_VECTOR;
1094 /* can't load system descriptor into segment selecor */
1095 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1099 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1105 cpl = ops->cpl(ctxt->vcpu);
1110 * segment is not a writable data segment or segment
1111 * selector's RPL != CPL or segment selector's RPL != CPL
1113 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1117 if (!(seg_desc.type & 8))
1120 if (seg_desc.type & 4) {
1126 if (rpl > cpl || dpl != cpl)
1129 /* CS(RPL) <- CPL */
1130 selector = (selector & 0xfffc) | cpl;
1133 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1136 case VCPU_SREG_LDTR:
1137 if (seg_desc.s || seg_desc.type != 2)
1140 default: /* DS, ES, FS, or GS */
1142 * segment is not a data or readable code segment or
1143 * ((segment is a data or nonconforming code segment)
1144 * and (both RPL and CPL > DPL))
1146 if ((seg_desc.type & 0xa) == 0x8 ||
1147 (((seg_desc.type & 0xc) != 0xc) &&
1148 (rpl > dpl && cpl > dpl)))
1154 /* mark segment as accessed */
1156 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1157 if (ret != X86EMUL_CONTINUE)
1161 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1162 ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1163 return X86EMUL_CONTINUE;
1165 emulate_exception(ctxt, err_vec, err_code, true);
1166 return X86EMUL_PROPAGATE_FAULT;
1169 static void write_register_operand(struct operand *op)
1171 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1172 switch (op->bytes) {
1174 *(u8 *)op->addr.reg = (u8)op->val;
1177 *(u16 *)op->addr.reg = (u16)op->val;
1180 *op->addr.reg = (u32)op->val;
1181 break; /* 64b: zero-extend */
1183 *op->addr.reg = op->val;
1188 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1189 struct x86_emulate_ops *ops)
1192 struct decode_cache *c = &ctxt->decode;
1194 switch (c->dst.type) {
1196 write_register_operand(&c->dst);
1200 rc = ops->cmpxchg_emulated(
1201 linear(ctxt, c->dst.addr.mem),
1208 rc = ops->write_emulated(
1209 linear(ctxt, c->dst.addr.mem),
1214 if (rc != X86EMUL_CONTINUE)
1218 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1226 return X86EMUL_CONTINUE;
1229 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1230 struct x86_emulate_ops *ops)
1232 struct decode_cache *c = &ctxt->decode;
1234 c->dst.type = OP_MEM;
1235 c->dst.bytes = c->op_bytes;
1236 c->dst.val = c->src.val;
1237 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1238 c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1239 c->dst.addr.mem.seg = VCPU_SREG_SS;
1242 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1243 struct x86_emulate_ops *ops,
1244 void *dest, int len)
1246 struct decode_cache *c = &ctxt->decode;
1248 struct segmented_address addr;
1250 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1251 addr.seg = VCPU_SREG_SS;
1252 rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1253 if (rc != X86EMUL_CONTINUE)
1256 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1260 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1261 struct x86_emulate_ops *ops,
1262 void *dest, int len)
1265 unsigned long val, change_mask;
1266 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1267 int cpl = ops->cpl(ctxt->vcpu);
1269 rc = emulate_pop(ctxt, ops, &val, len);
1270 if (rc != X86EMUL_CONTINUE)
1273 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1274 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1276 switch(ctxt->mode) {
1277 case X86EMUL_MODE_PROT64:
1278 case X86EMUL_MODE_PROT32:
1279 case X86EMUL_MODE_PROT16:
1281 change_mask |= EFLG_IOPL;
1283 change_mask |= EFLG_IF;
1285 case X86EMUL_MODE_VM86:
1287 return emulate_gp(ctxt, 0);
1288 change_mask |= EFLG_IF;
1290 default: /* real mode */
1291 change_mask |= (EFLG_IOPL | EFLG_IF);
1295 *(unsigned long *)dest =
1296 (ctxt->eflags & ~change_mask) | (val & change_mask);
1301 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1302 struct x86_emulate_ops *ops, int seg)
1304 struct decode_cache *c = &ctxt->decode;
1306 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1308 emulate_push(ctxt, ops);
1311 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1312 struct x86_emulate_ops *ops, int seg)
1314 struct decode_cache *c = &ctxt->decode;
1315 unsigned long selector;
1318 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1319 if (rc != X86EMUL_CONTINUE)
1322 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1326 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1327 struct x86_emulate_ops *ops)
1329 struct decode_cache *c = &ctxt->decode;
1330 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1331 int rc = X86EMUL_CONTINUE;
1332 int reg = VCPU_REGS_RAX;
1334 while (reg <= VCPU_REGS_RDI) {
1335 (reg == VCPU_REGS_RSP) ?
1336 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1338 emulate_push(ctxt, ops);
1340 rc = writeback(ctxt, ops);
1341 if (rc != X86EMUL_CONTINUE)
1347 /* Disable writeback. */
1348 c->dst.type = OP_NONE;
1353 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1354 struct x86_emulate_ops *ops)
1356 struct decode_cache *c = &ctxt->decode;
1357 int rc = X86EMUL_CONTINUE;
1358 int reg = VCPU_REGS_RDI;
1360 while (reg >= VCPU_REGS_RAX) {
1361 if (reg == VCPU_REGS_RSP) {
1362 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1367 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1368 if (rc != X86EMUL_CONTINUE)
1375 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1376 struct x86_emulate_ops *ops, int irq)
1378 struct decode_cache *c = &ctxt->decode;
1385 /* TODO: Add limit checks */
1386 c->src.val = ctxt->eflags;
1387 emulate_push(ctxt, ops);
1388 rc = writeback(ctxt, ops);
1389 if (rc != X86EMUL_CONTINUE)
1392 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1394 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1395 emulate_push(ctxt, ops);
1396 rc = writeback(ctxt, ops);
1397 if (rc != X86EMUL_CONTINUE)
1400 c->src.val = c->eip;
1401 emulate_push(ctxt, ops);
1402 rc = writeback(ctxt, ops);
1403 if (rc != X86EMUL_CONTINUE)
1406 c->dst.type = OP_NONE;
1408 ops->get_idt(&dt, ctxt->vcpu);
1410 eip_addr = dt.address + (irq << 2);
1411 cs_addr = dt.address + (irq << 2) + 2;
1413 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1414 if (rc != X86EMUL_CONTINUE)
1417 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1418 if (rc != X86EMUL_CONTINUE)
1421 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1422 if (rc != X86EMUL_CONTINUE)
1430 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1431 struct x86_emulate_ops *ops, int irq)
1433 switch(ctxt->mode) {
1434 case X86EMUL_MODE_REAL:
1435 return emulate_int_real(ctxt, ops, irq);
1436 case X86EMUL_MODE_VM86:
1437 case X86EMUL_MODE_PROT16:
1438 case X86EMUL_MODE_PROT32:
1439 case X86EMUL_MODE_PROT64:
1441 /* Protected mode interrupts unimplemented yet */
1442 return X86EMUL_UNHANDLEABLE;
1446 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1447 struct x86_emulate_ops *ops)
1449 struct decode_cache *c = &ctxt->decode;
1450 int rc = X86EMUL_CONTINUE;
1451 unsigned long temp_eip = 0;
1452 unsigned long temp_eflags = 0;
1453 unsigned long cs = 0;
1454 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1455 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1456 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1457 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1459 /* TODO: Add stack limit check */
1461 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1463 if (rc != X86EMUL_CONTINUE)
1466 if (temp_eip & ~0xffff)
1467 return emulate_gp(ctxt, 0);
1469 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1471 if (rc != X86EMUL_CONTINUE)
1474 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1476 if (rc != X86EMUL_CONTINUE)
1479 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1481 if (rc != X86EMUL_CONTINUE)
1487 if (c->op_bytes == 4)
1488 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1489 else if (c->op_bytes == 2) {
1490 ctxt->eflags &= ~0xffff;
1491 ctxt->eflags |= temp_eflags;
1494 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1495 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1500 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1501 struct x86_emulate_ops* ops)
1503 switch(ctxt->mode) {
1504 case X86EMUL_MODE_REAL:
1505 return emulate_iret_real(ctxt, ops);
1506 case X86EMUL_MODE_VM86:
1507 case X86EMUL_MODE_PROT16:
1508 case X86EMUL_MODE_PROT32:
1509 case X86EMUL_MODE_PROT64:
1511 /* iret from protected mode unimplemented yet */
1512 return X86EMUL_UNHANDLEABLE;
1516 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1517 struct x86_emulate_ops *ops)
1519 struct decode_cache *c = &ctxt->decode;
1521 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1524 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1526 struct decode_cache *c = &ctxt->decode;
1527 switch (c->modrm_reg) {
1529 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1532 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1535 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1538 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1540 case 4: /* sal/shl */
1541 case 6: /* sal/shl */
1542 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1545 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1548 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1553 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1554 struct x86_emulate_ops *ops)
1556 struct decode_cache *c = &ctxt->decode;
1557 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1558 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1561 switch (c->modrm_reg) {
1562 case 0 ... 1: /* test */
1563 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1566 c->dst.val = ~c->dst.val;
1569 emulate_1op("neg", c->dst, ctxt->eflags);
1572 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1575 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1578 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1582 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1586 return X86EMUL_UNHANDLEABLE;
1589 return emulate_de(ctxt);
1590 return X86EMUL_CONTINUE;
1593 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1594 struct x86_emulate_ops *ops)
1596 struct decode_cache *c = &ctxt->decode;
1598 switch (c->modrm_reg) {
1600 emulate_1op("inc", c->dst, ctxt->eflags);
1603 emulate_1op("dec", c->dst, ctxt->eflags);
1605 case 2: /* call near abs */ {
1608 c->eip = c->src.val;
1609 c->src.val = old_eip;
1610 emulate_push(ctxt, ops);
1613 case 4: /* jmp abs */
1614 c->eip = c->src.val;
1617 emulate_push(ctxt, ops);
1620 return X86EMUL_CONTINUE;
1623 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1624 struct x86_emulate_ops *ops)
1626 struct decode_cache *c = &ctxt->decode;
1627 u64 old = c->dst.orig_val64;
1629 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1630 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1631 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1632 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1633 ctxt->eflags &= ~EFLG_ZF;
1635 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1636 (u32) c->regs[VCPU_REGS_RBX];
1638 ctxt->eflags |= EFLG_ZF;
1640 return X86EMUL_CONTINUE;
1643 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1644 struct x86_emulate_ops *ops)
1646 struct decode_cache *c = &ctxt->decode;
1650 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1651 if (rc != X86EMUL_CONTINUE)
1653 if (c->op_bytes == 4)
1654 c->eip = (u32)c->eip;
1655 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1656 if (rc != X86EMUL_CONTINUE)
1658 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1662 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1663 struct x86_emulate_ops *ops, int seg)
1665 struct decode_cache *c = &ctxt->decode;
1669 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1671 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1672 if (rc != X86EMUL_CONTINUE)
1675 c->dst.val = c->src.val;
1680 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1681 struct x86_emulate_ops *ops, struct desc_struct *cs,
1682 struct desc_struct *ss)
1684 memset(cs, 0, sizeof(struct desc_struct));
1685 ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1686 memset(ss, 0, sizeof(struct desc_struct));
1688 cs->l = 0; /* will be adjusted later */
1689 set_desc_base(cs, 0); /* flat segment */
1690 cs->g = 1; /* 4kb granularity */
1691 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1692 cs->type = 0x0b; /* Read, Execute, Accessed */
1694 cs->dpl = 0; /* will be adjusted later */
1698 set_desc_base(ss, 0); /* flat segment */
1699 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1700 ss->g = 1; /* 4kb granularity */
1702 ss->type = 0x03; /* Read/Write, Accessed */
1703 ss->d = 1; /* 32bit stack segment */
1709 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1711 struct decode_cache *c = &ctxt->decode;
1712 struct desc_struct cs, ss;
1716 /* syscall is not available in real mode */
1717 if (ctxt->mode == X86EMUL_MODE_REAL ||
1718 ctxt->mode == X86EMUL_MODE_VM86)
1719 return emulate_ud(ctxt);
1721 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1723 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1725 cs_sel = (u16)(msr_data & 0xfffc);
1726 ss_sel = (u16)(msr_data + 8);
1728 if (is_long_mode(ctxt->vcpu)) {
1732 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1733 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1734 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1735 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1737 c->regs[VCPU_REGS_RCX] = c->eip;
1738 if (is_long_mode(ctxt->vcpu)) {
1739 #ifdef CONFIG_X86_64
1740 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1742 ops->get_msr(ctxt->vcpu,
1743 ctxt->mode == X86EMUL_MODE_PROT64 ?
1744 MSR_LSTAR : MSR_CSTAR, &msr_data);
1747 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1748 ctxt->eflags &= ~(msr_data | EFLG_RF);
1752 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1753 c->eip = (u32)msr_data;
1755 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1758 return X86EMUL_CONTINUE;
1762 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1764 struct decode_cache *c = &ctxt->decode;
1765 struct desc_struct cs, ss;
1769 /* inject #GP if in real mode */
1770 if (ctxt->mode == X86EMUL_MODE_REAL)
1771 return emulate_gp(ctxt, 0);
1773 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1774 * Therefore, we inject an #UD.
1776 if (ctxt->mode == X86EMUL_MODE_PROT64)
1777 return emulate_ud(ctxt);
1779 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1781 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1782 switch (ctxt->mode) {
1783 case X86EMUL_MODE_PROT32:
1784 if ((msr_data & 0xfffc) == 0x0)
1785 return emulate_gp(ctxt, 0);
1787 case X86EMUL_MODE_PROT64:
1788 if (msr_data == 0x0)
1789 return emulate_gp(ctxt, 0);
1793 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1794 cs_sel = (u16)msr_data;
1795 cs_sel &= ~SELECTOR_RPL_MASK;
1796 ss_sel = cs_sel + 8;
1797 ss_sel &= ~SELECTOR_RPL_MASK;
1798 if (ctxt->mode == X86EMUL_MODE_PROT64
1799 || is_long_mode(ctxt->vcpu)) {
1804 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1805 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1806 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1807 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1809 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1812 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1813 c->regs[VCPU_REGS_RSP] = msr_data;
1815 return X86EMUL_CONTINUE;
1819 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1821 struct decode_cache *c = &ctxt->decode;
1822 struct desc_struct cs, ss;
1827 /* inject #GP if in real mode or Virtual 8086 mode */
1828 if (ctxt->mode == X86EMUL_MODE_REAL ||
1829 ctxt->mode == X86EMUL_MODE_VM86)
1830 return emulate_gp(ctxt, 0);
1832 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1834 if ((c->rex_prefix & 0x8) != 0x0)
1835 usermode = X86EMUL_MODE_PROT64;
1837 usermode = X86EMUL_MODE_PROT32;
1841 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1843 case X86EMUL_MODE_PROT32:
1844 cs_sel = (u16)(msr_data + 16);
1845 if ((msr_data & 0xfffc) == 0x0)
1846 return emulate_gp(ctxt, 0);
1847 ss_sel = (u16)(msr_data + 24);
1849 case X86EMUL_MODE_PROT64:
1850 cs_sel = (u16)(msr_data + 32);
1851 if (msr_data == 0x0)
1852 return emulate_gp(ctxt, 0);
1853 ss_sel = cs_sel + 8;
1858 cs_sel |= SELECTOR_RPL_MASK;
1859 ss_sel |= SELECTOR_RPL_MASK;
1861 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1862 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1863 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1864 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1866 c->eip = c->regs[VCPU_REGS_RDX];
1867 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1869 return X86EMUL_CONTINUE;
1872 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1873 struct x86_emulate_ops *ops)
1876 if (ctxt->mode == X86EMUL_MODE_REAL)
1878 if (ctxt->mode == X86EMUL_MODE_VM86)
1880 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1881 return ops->cpl(ctxt->vcpu) > iopl;
1884 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1885 struct x86_emulate_ops *ops,
1888 struct desc_struct tr_seg;
1891 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
1892 unsigned mask = (1 << len) - 1;
1895 ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1898 if (desc_limit_scaled(&tr_seg) < 103)
1900 base = get_desc_base(&tr_seg);
1901 #ifdef CONFIG_X86_64
1902 base |= ((u64)base3) << 32;
1904 r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1905 if (r != X86EMUL_CONTINUE)
1907 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1909 r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu,
1911 if (r != X86EMUL_CONTINUE)
1913 if ((perm >> bit_idx) & mask)
1918 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1919 struct x86_emulate_ops *ops,
1925 if (emulator_bad_iopl(ctxt, ops))
1926 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1929 ctxt->perm_ok = true;
1934 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1935 struct x86_emulate_ops *ops,
1936 struct tss_segment_16 *tss)
1938 struct decode_cache *c = &ctxt->decode;
1941 tss->flag = ctxt->eflags;
1942 tss->ax = c->regs[VCPU_REGS_RAX];
1943 tss->cx = c->regs[VCPU_REGS_RCX];
1944 tss->dx = c->regs[VCPU_REGS_RDX];
1945 tss->bx = c->regs[VCPU_REGS_RBX];
1946 tss->sp = c->regs[VCPU_REGS_RSP];
1947 tss->bp = c->regs[VCPU_REGS_RBP];
1948 tss->si = c->regs[VCPU_REGS_RSI];
1949 tss->di = c->regs[VCPU_REGS_RDI];
1951 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1952 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1953 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1954 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1955 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1958 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1959 struct x86_emulate_ops *ops,
1960 struct tss_segment_16 *tss)
1962 struct decode_cache *c = &ctxt->decode;
1966 ctxt->eflags = tss->flag | 2;
1967 c->regs[VCPU_REGS_RAX] = tss->ax;
1968 c->regs[VCPU_REGS_RCX] = tss->cx;
1969 c->regs[VCPU_REGS_RDX] = tss->dx;
1970 c->regs[VCPU_REGS_RBX] = tss->bx;
1971 c->regs[VCPU_REGS_RSP] = tss->sp;
1972 c->regs[VCPU_REGS_RBP] = tss->bp;
1973 c->regs[VCPU_REGS_RSI] = tss->si;
1974 c->regs[VCPU_REGS_RDI] = tss->di;
1977 * SDM says that segment selectors are loaded before segment
1980 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1981 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1982 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1983 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1984 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1987 * Now load segment descriptors. If fault happenes at this stage
1988 * it is handled in a context of new task
1990 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1991 if (ret != X86EMUL_CONTINUE)
1993 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1994 if (ret != X86EMUL_CONTINUE)
1996 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1997 if (ret != X86EMUL_CONTINUE)
1999 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2000 if (ret != X86EMUL_CONTINUE)
2002 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2003 if (ret != X86EMUL_CONTINUE)
2006 return X86EMUL_CONTINUE;
2009 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2010 struct x86_emulate_ops *ops,
2011 u16 tss_selector, u16 old_tss_sel,
2012 ulong old_tss_base, struct desc_struct *new_desc)
2014 struct tss_segment_16 tss_seg;
2016 u32 new_tss_base = get_desc_base(new_desc);
2018 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2020 if (ret != X86EMUL_CONTINUE)
2021 /* FIXME: need to provide precise fault address */
2024 save_state_to_tss16(ctxt, ops, &tss_seg);
2026 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2028 if (ret != X86EMUL_CONTINUE)
2029 /* FIXME: need to provide precise fault address */
2032 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2034 if (ret != X86EMUL_CONTINUE)
2035 /* FIXME: need to provide precise fault address */
2038 if (old_tss_sel != 0xffff) {
2039 tss_seg.prev_task_link = old_tss_sel;
2041 ret = ops->write_std(new_tss_base,
2042 &tss_seg.prev_task_link,
2043 sizeof tss_seg.prev_task_link,
2044 ctxt->vcpu, &ctxt->exception);
2045 if (ret != X86EMUL_CONTINUE)
2046 /* FIXME: need to provide precise fault address */
2050 return load_state_from_tss16(ctxt, ops, &tss_seg);
2053 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2054 struct x86_emulate_ops *ops,
2055 struct tss_segment_32 *tss)
2057 struct decode_cache *c = &ctxt->decode;
2059 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
2061 tss->eflags = ctxt->eflags;
2062 tss->eax = c->regs[VCPU_REGS_RAX];
2063 tss->ecx = c->regs[VCPU_REGS_RCX];
2064 tss->edx = c->regs[VCPU_REGS_RDX];
2065 tss->ebx = c->regs[VCPU_REGS_RBX];
2066 tss->esp = c->regs[VCPU_REGS_RSP];
2067 tss->ebp = c->regs[VCPU_REGS_RBP];
2068 tss->esi = c->regs[VCPU_REGS_RSI];
2069 tss->edi = c->regs[VCPU_REGS_RDI];
2071 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2072 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2073 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2074 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2075 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2076 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2077 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2080 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2081 struct x86_emulate_ops *ops,
2082 struct tss_segment_32 *tss)
2084 struct decode_cache *c = &ctxt->decode;
2087 if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
2088 return emulate_gp(ctxt, 0);
2090 ctxt->eflags = tss->eflags | 2;
2091 c->regs[VCPU_REGS_RAX] = tss->eax;
2092 c->regs[VCPU_REGS_RCX] = tss->ecx;
2093 c->regs[VCPU_REGS_RDX] = tss->edx;
2094 c->regs[VCPU_REGS_RBX] = tss->ebx;
2095 c->regs[VCPU_REGS_RSP] = tss->esp;
2096 c->regs[VCPU_REGS_RBP] = tss->ebp;
2097 c->regs[VCPU_REGS_RSI] = tss->esi;
2098 c->regs[VCPU_REGS_RDI] = tss->edi;
2101 * SDM says that segment selectors are loaded before segment
2104 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2105 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2106 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2107 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2108 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2109 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2110 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2113 * Now load segment descriptors. If fault happenes at this stage
2114 * it is handled in a context of new task
2116 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2117 if (ret != X86EMUL_CONTINUE)
2119 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2120 if (ret != X86EMUL_CONTINUE)
2122 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2123 if (ret != X86EMUL_CONTINUE)
2125 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2126 if (ret != X86EMUL_CONTINUE)
2128 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2129 if (ret != X86EMUL_CONTINUE)
2131 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2132 if (ret != X86EMUL_CONTINUE)
2134 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2135 if (ret != X86EMUL_CONTINUE)
2138 return X86EMUL_CONTINUE;
2141 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2142 struct x86_emulate_ops *ops,
2143 u16 tss_selector, u16 old_tss_sel,
2144 ulong old_tss_base, struct desc_struct *new_desc)
2146 struct tss_segment_32 tss_seg;
2148 u32 new_tss_base = get_desc_base(new_desc);
2150 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2152 if (ret != X86EMUL_CONTINUE)
2153 /* FIXME: need to provide precise fault address */
2156 save_state_to_tss32(ctxt, ops, &tss_seg);
2158 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2160 if (ret != X86EMUL_CONTINUE)
2161 /* FIXME: need to provide precise fault address */
2164 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2166 if (ret != X86EMUL_CONTINUE)
2167 /* FIXME: need to provide precise fault address */
2170 if (old_tss_sel != 0xffff) {
2171 tss_seg.prev_task_link = old_tss_sel;
2173 ret = ops->write_std(new_tss_base,
2174 &tss_seg.prev_task_link,
2175 sizeof tss_seg.prev_task_link,
2176 ctxt->vcpu, &ctxt->exception);
2177 if (ret != X86EMUL_CONTINUE)
2178 /* FIXME: need to provide precise fault address */
2182 return load_state_from_tss32(ctxt, ops, &tss_seg);
2185 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2186 struct x86_emulate_ops *ops,
2187 u16 tss_selector, int reason,
2188 bool has_error_code, u32 error_code)
2190 struct desc_struct curr_tss_desc, next_tss_desc;
2192 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2193 ulong old_tss_base =
2194 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2197 /* FIXME: old_tss_base == ~0 ? */
2199 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2200 if (ret != X86EMUL_CONTINUE)
2202 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2203 if (ret != X86EMUL_CONTINUE)
2206 /* FIXME: check that next_tss_desc is tss */
2208 if (reason != TASK_SWITCH_IRET) {
2209 if ((tss_selector & 3) > next_tss_desc.dpl ||
2210 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
2211 return emulate_gp(ctxt, 0);
2214 desc_limit = desc_limit_scaled(&next_tss_desc);
2215 if (!next_tss_desc.p ||
2216 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2217 desc_limit < 0x2b)) {
2218 emulate_ts(ctxt, tss_selector & 0xfffc);
2219 return X86EMUL_PROPAGATE_FAULT;
2222 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2223 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2224 write_segment_descriptor(ctxt, ops, old_tss_sel,
2228 if (reason == TASK_SWITCH_IRET)
2229 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2231 /* set back link to prev task only if NT bit is set in eflags
2232 note that old_tss_sel is not used afetr this point */
2233 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2234 old_tss_sel = 0xffff;
2236 if (next_tss_desc.type & 8)
2237 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2238 old_tss_base, &next_tss_desc);
2240 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2241 old_tss_base, &next_tss_desc);
2242 if (ret != X86EMUL_CONTINUE)
2245 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2246 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2248 if (reason != TASK_SWITCH_IRET) {
2249 next_tss_desc.type |= (1 << 1); /* set busy flag */
2250 write_segment_descriptor(ctxt, ops, tss_selector,
2254 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2255 ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2256 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2258 if (has_error_code) {
2259 struct decode_cache *c = &ctxt->decode;
2261 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2263 c->src.val = (unsigned long) error_code;
2264 emulate_push(ctxt, ops);
2270 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2271 u16 tss_selector, int reason,
2272 bool has_error_code, u32 error_code)
2274 struct x86_emulate_ops *ops = ctxt->ops;
2275 struct decode_cache *c = &ctxt->decode;
2279 c->dst.type = OP_NONE;
2281 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2282 has_error_code, error_code);
2284 if (rc == X86EMUL_CONTINUE) {
2285 rc = writeback(ctxt, ops);
2286 if (rc == X86EMUL_CONTINUE)
2290 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2293 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2294 int reg, struct operand *op)
2296 struct decode_cache *c = &ctxt->decode;
2297 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2299 register_address_increment(c, &c->regs[reg], df * op->bytes);
2300 op->addr.mem.ea = register_address(c, c->regs[reg]);
2301 op->addr.mem.seg = seg;
2304 static int em_push(struct x86_emulate_ctxt *ctxt)
2306 emulate_push(ctxt, ctxt->ops);
2307 return X86EMUL_CONTINUE;
2310 static int em_das(struct x86_emulate_ctxt *ctxt)
2312 struct decode_cache *c = &ctxt->decode;
2314 bool af, cf, old_cf;
2316 cf = ctxt->eflags & X86_EFLAGS_CF;
2322 af = ctxt->eflags & X86_EFLAGS_AF;
2323 if ((al & 0x0f) > 9 || af) {
2325 cf = old_cf | (al >= 250);
2330 if (old_al > 0x99 || old_cf) {
2336 /* Set PF, ZF, SF */
2337 c->src.type = OP_IMM;
2340 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2341 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2343 ctxt->eflags |= X86_EFLAGS_CF;
2345 ctxt->eflags |= X86_EFLAGS_AF;
2346 return X86EMUL_CONTINUE;
2349 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2351 struct decode_cache *c = &ctxt->decode;
2356 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2359 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2360 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2361 return X86EMUL_CONTINUE;
2364 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2366 c->src.val = old_cs;
2367 emulate_push(ctxt, ctxt->ops);
2368 rc = writeback(ctxt, ctxt->ops);
2369 if (rc != X86EMUL_CONTINUE)
2372 c->src.val = old_eip;
2373 emulate_push(ctxt, ctxt->ops);
2374 rc = writeback(ctxt, ctxt->ops);
2375 if (rc != X86EMUL_CONTINUE)
2378 c->dst.type = OP_NONE;
2380 return X86EMUL_CONTINUE;
2383 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2385 struct decode_cache *c = &ctxt->decode;
2388 c->dst.type = OP_REG;
2389 c->dst.addr.reg = &c->eip;
2390 c->dst.bytes = c->op_bytes;
2391 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2392 if (rc != X86EMUL_CONTINUE)
2394 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2395 return X86EMUL_CONTINUE;
2398 static int em_imul(struct x86_emulate_ctxt *ctxt)
2400 struct decode_cache *c = &ctxt->decode;
2402 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2403 return X86EMUL_CONTINUE;
2406 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2408 struct decode_cache *c = &ctxt->decode;
2410 c->dst.val = c->src2.val;
2411 return em_imul(ctxt);
2414 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2416 struct decode_cache *c = &ctxt->decode;
2418 c->dst.type = OP_REG;
2419 c->dst.bytes = c->src.bytes;
2420 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2421 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2423 return X86EMUL_CONTINUE;
2426 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2428 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2429 struct decode_cache *c = &ctxt->decode;
2432 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
2433 return emulate_gp(ctxt, 0);
2434 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2435 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2436 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2437 return X86EMUL_CONTINUE;
2440 static int em_mov(struct x86_emulate_ctxt *ctxt)
2442 struct decode_cache *c = &ctxt->decode;
2443 c->dst.val = c->src.val;
2444 return X86EMUL_CONTINUE;
2447 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2449 struct decode_cache *c = &ctxt->decode;
2450 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2451 return X86EMUL_CONTINUE;
2454 static bool valid_cr(int nr)
2466 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2468 struct decode_cache *c = &ctxt->decode;
2470 if (!valid_cr(c->modrm_reg))
2471 return emulate_ud(ctxt);
2473 return X86EMUL_CONTINUE;
2476 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2478 struct decode_cache *c = &ctxt->decode;
2479 u64 new_val = c->src.val64;
2480 int cr = c->modrm_reg;
2482 static u64 cr_reserved_bits[] = {
2483 0xffffffff00000000ULL,
2484 0, 0, 0, /* CR3 checked later */
2491 return emulate_ud(ctxt);
2493 if (new_val & cr_reserved_bits[cr])
2494 return emulate_gp(ctxt, 0);
2499 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2500 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2501 return emulate_gp(ctxt, 0);
2503 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
2504 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
2506 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2507 !(cr4 & X86_CR4_PAE))
2508 return emulate_gp(ctxt, 0);
2515 if (is_long_mode(ctxt->vcpu))
2516 rsvd = CR3_L_MODE_RESERVED_BITS;
2517 else if (is_pae(ctxt->vcpu))
2518 rsvd = CR3_PAE_RESERVED_BITS;
2519 else if (is_paging(ctxt->vcpu))
2520 rsvd = CR3_NONPAE_RESERVED_BITS;
2523 return emulate_gp(ctxt, 0);
2530 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
2531 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
2533 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2534 return emulate_gp(ctxt, 0);
2540 return X86EMUL_CONTINUE;
2543 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2547 ctxt->ops->get_dr(7, &dr7, ctxt->vcpu);
2549 /* Check if DR7.Global_Enable is set */
2550 return dr7 & (1 << 13);
2553 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2555 struct decode_cache *c = &ctxt->decode;
2556 int dr = c->modrm_reg;
2560 return emulate_ud(ctxt);
2562 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
2563 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2564 return emulate_ud(ctxt);
2566 if (check_dr7_gd(ctxt))
2567 return emulate_db(ctxt);
2569 return X86EMUL_CONTINUE;
2572 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2574 struct decode_cache *c = &ctxt->decode;
2575 u64 new_val = c->src.val64;
2576 int dr = c->modrm_reg;
2578 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2579 return emulate_gp(ctxt, 0);
2581 return check_dr_read(ctxt);
2584 static int check_svme(struct x86_emulate_ctxt *ctxt)
2588 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
2590 if (!(efer & EFER_SVME))
2591 return emulate_ud(ctxt);
2593 return X86EMUL_CONTINUE;
2596 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2598 u64 rax = kvm_register_read(ctxt->vcpu, VCPU_REGS_RAX);
2600 /* Valid physical address? */
2601 if (rax & 0xffff000000000000)
2602 return emulate_gp(ctxt, 0);
2604 return check_svme(ctxt);
2607 #define D(_y) { .flags = (_y) }
2608 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2609 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2610 .check_perm = (_p) }
2612 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2613 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2614 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2615 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2616 #define II(_f, _e, _i) \
2617 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2618 #define IIP(_f, _e, _i, _p) \
2619 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2620 .check_perm = (_p) }
2621 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2623 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2624 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2626 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2627 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2628 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2630 static struct opcode group7_rm3[] = {
2631 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
2632 DIP(SrcNone | ModRM | Prot , vmmcall, check_svme),
2633 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
2634 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
2635 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
2636 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
2637 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
2638 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
2641 static struct opcode group1[] = {
2645 static struct opcode group1A[] = {
2646 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2649 static struct opcode group3[] = {
2650 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2651 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2652 X4(D(SrcMem | ModRM)),
2655 static struct opcode group4[] = {
2656 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2660 static struct opcode group5[] = {
2661 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2662 D(SrcMem | ModRM | Stack),
2663 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2664 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2665 D(SrcMem | ModRM | Stack), N,
2668 static struct opcode group6[] = {
2669 DI(ModRM | Prot, sldt),
2670 DI(ModRM | Prot, str),
2671 DI(ModRM | Prot | Priv, lldt),
2672 DI(ModRM | Prot | Priv, ltr),
2676 static struct group_dual group7 = { {
2677 DI(ModRM | Mov | DstMem | Priv, sgdt),
2678 DI(ModRM | Mov | DstMem | Priv, sidt),
2679 DI(ModRM | SrcMem | Priv, lgdt), DI(ModRM | SrcMem | Priv, lidt),
2680 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2681 DI(SrcMem16 | ModRM | Mov | Priv, lmsw),
2682 DI(SrcMem | ModRM | ByteOp | Priv | NoAccess, invlpg),
2684 D(SrcNone | ModRM | Priv | VendorSpecific), N,
2685 N, EXT(0, group7_rm3),
2686 DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
2687 DI(SrcMem16 | ModRM | Mov | Priv, lmsw), N,
2690 static struct opcode group8[] = {
2692 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2693 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2696 static struct group_dual group9 = { {
2697 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2699 N, N, N, N, N, N, N, N,
2702 static struct opcode group11[] = {
2703 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2706 static struct gprefix pfx_0f_6f_0f_7f = {
2707 N, N, N, I(Sse, em_movdqu),
2710 static struct opcode opcode_table[256] = {
2713 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2716 D(ImplicitOps | Stack | No64), N,
2719 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2722 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2726 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2734 X8(I(SrcReg | Stack, em_push)),
2736 X8(D(DstReg | Stack)),
2738 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2739 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2742 I(SrcImm | Mov | Stack, em_push),
2743 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2744 I(SrcImmByte | Mov | Stack, em_push),
2745 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2746 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2747 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2751 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2752 G(DstMem | SrcImm | ModRM | Group, group1),
2753 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2754 G(DstMem | SrcImmByte | ModRM | Group, group1),
2755 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2757 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2758 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2759 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2760 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2762 X8(D(SrcAcc | DstReg)),
2764 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2765 I(SrcImmFAddr | No64, em_call_far), N,
2766 DI(ImplicitOps | Stack, pushf), DI(ImplicitOps | Stack, popf), N, N,
2768 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2769 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2770 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2771 D2bv(SrcSI | DstDI | String),
2773 D2bv(DstAcc | SrcImm),
2774 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2775 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2776 D2bv(SrcAcc | DstDI | String),
2778 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2780 X8(I(DstReg | SrcImm | Mov, em_mov)),
2782 D2bv(DstMem | SrcImmByte | ModRM),
2783 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2784 D(ImplicitOps | Stack),
2785 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2786 G(ByteOp, group11), G(0, group11),
2788 N, N, N, D(ImplicitOps | Stack),
2789 D(ImplicitOps), DI(SrcImmByte, intn),
2790 D(ImplicitOps | No64), DI(ImplicitOps, iret),
2792 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2795 N, N, N, N, N, N, N, N,
2798 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2800 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2801 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2802 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2805 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
2806 G(ByteOp, group3), G(0, group3),
2808 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2809 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2812 static struct opcode twobyte_table[256] = {
2814 G(0, group6), GD(0, &group7), N, N,
2815 N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
2816 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
2817 N, D(ImplicitOps | ModRM), N, N,
2819 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2821 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
2822 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
2823 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
2824 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
2826 N, N, N, N, N, N, N, N,
2828 D(ImplicitOps | Priv), II(ImplicitOps, em_rdtsc, rdtsc),
2829 D(ImplicitOps | Priv), N,
2830 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
2832 N, N, N, N, N, N, N, N,
2834 X16(D(DstReg | SrcMem | ModRM | Mov)),
2836 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2841 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
2846 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
2850 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2852 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2853 N, D(DstMem | SrcReg | ModRM | BitOp),
2854 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2855 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2857 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2858 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2859 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2860 D(DstMem | SrcReg | Src2CL | ModRM),
2861 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2863 D2bv(DstMem | SrcReg | ModRM | Lock),
2864 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2865 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2866 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2869 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2870 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2871 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2873 D2bv(DstMem | SrcReg | ModRM | Lock),
2874 N, D(DstMem | SrcReg | ModRM | Mov),
2875 N, N, N, GD(0, &group9),
2876 N, N, N, N, N, N, N, N,
2878 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2880 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2882 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2897 static unsigned imm_size(struct decode_cache *c)
2901 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2907 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2908 unsigned size, bool sign_extension)
2910 struct decode_cache *c = &ctxt->decode;
2911 struct x86_emulate_ops *ops = ctxt->ops;
2912 int rc = X86EMUL_CONTINUE;
2916 op->addr.mem.ea = c->eip;
2917 /* NB. Immediates are sign-extended as necessary. */
2918 switch (op->bytes) {
2920 op->val = insn_fetch(s8, 1, c->eip);
2923 op->val = insn_fetch(s16, 2, c->eip);
2926 op->val = insn_fetch(s32, 4, c->eip);
2929 if (!sign_extension) {
2930 switch (op->bytes) {
2938 op->val &= 0xffffffff;
2947 x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
2949 struct x86_emulate_ops *ops = ctxt->ops;
2950 struct decode_cache *c = &ctxt->decode;
2951 int rc = X86EMUL_CONTINUE;
2952 int mode = ctxt->mode;
2953 int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
2954 bool op_prefix = false;
2955 struct opcode opcode, *g_mod012, *g_mod3;
2956 struct operand memop = { .type = OP_NONE };
2959 c->fetch.start = c->eip;
2960 c->fetch.end = c->fetch.start + insn_len;
2962 memcpy(c->fetch.data, insn, insn_len);
2963 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2966 case X86EMUL_MODE_REAL:
2967 case X86EMUL_MODE_VM86:
2968 case X86EMUL_MODE_PROT16:
2969 def_op_bytes = def_ad_bytes = 2;
2971 case X86EMUL_MODE_PROT32:
2972 def_op_bytes = def_ad_bytes = 4;
2974 #ifdef CONFIG_X86_64
2975 case X86EMUL_MODE_PROT64:
2984 c->op_bytes = def_op_bytes;
2985 c->ad_bytes = def_ad_bytes;
2987 /* Legacy prefixes. */
2989 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2990 case 0x66: /* operand-size override */
2992 /* switch between 2/4 bytes */
2993 c->op_bytes = def_op_bytes ^ 6;
2995 case 0x67: /* address-size override */
2996 if (mode == X86EMUL_MODE_PROT64)
2997 /* switch between 4/8 bytes */
2998 c->ad_bytes = def_ad_bytes ^ 12;
3000 /* switch between 2/4 bytes */
3001 c->ad_bytes = def_ad_bytes ^ 6;
3003 case 0x26: /* ES override */
3004 case 0x2e: /* CS override */
3005 case 0x36: /* SS override */
3006 case 0x3e: /* DS override */
3007 set_seg_override(c, (c->b >> 3) & 3);
3009 case 0x64: /* FS override */
3010 case 0x65: /* GS override */
3011 set_seg_override(c, c->b & 7);
3013 case 0x40 ... 0x4f: /* REX */
3014 if (mode != X86EMUL_MODE_PROT64)
3016 c->rex_prefix = c->b;
3018 case 0xf0: /* LOCK */
3021 case 0xf2: /* REPNE/REPNZ */
3022 case 0xf3: /* REP/REPE/REPZ */
3023 c->rep_prefix = c->b;
3029 /* Any legacy prefix after a REX prefix nullifies its effect. */
3037 if (c->rex_prefix & 8)
3038 c->op_bytes = 8; /* REX.W */
3040 /* Opcode byte(s). */
3041 opcode = opcode_table[c->b];
3042 /* Two-byte opcode? */
3045 c->b = insn_fetch(u8, 1, c->eip);
3046 opcode = twobyte_table[c->b];
3048 c->d = opcode.flags;
3051 dual = c->d & GroupDual;
3052 c->modrm = insn_fetch(u8, 1, c->eip);
3055 if (c->d & GroupDual) {
3056 g_mod012 = opcode.u.gdual->mod012;
3057 g_mod3 = opcode.u.gdual->mod3;
3059 g_mod012 = g_mod3 = opcode.u.group;
3061 c->d &= ~(Group | GroupDual);
3063 goffset = (c->modrm >> 3) & 7;
3065 if ((c->modrm >> 6) == 3)
3066 opcode = g_mod3[goffset];
3068 opcode = g_mod012[goffset];
3070 if (opcode.flags & RMExt) {
3071 goffset = c->modrm & 7;
3072 opcode = opcode.u.group[goffset];
3075 c->d |= opcode.flags;
3078 if (c->d & Prefix) {
3079 if (c->rep_prefix && op_prefix)
3080 return X86EMUL_UNHANDLEABLE;
3081 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
3082 switch (simd_prefix) {
3083 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3084 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3085 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3086 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3088 c->d |= opcode.flags;
3091 c->execute = opcode.u.execute;
3092 c->check_perm = opcode.check_perm;
3093 c->intercept = opcode.intercept;
3096 if (c->d == 0 || (c->d & Undefined))
3099 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3102 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
3105 if (c->d & Op3264) {
3106 if (mode == X86EMUL_MODE_PROT64)
3115 /* ModRM and SIB bytes. */
3117 rc = decode_modrm(ctxt, ops, &memop);
3118 if (!c->has_seg_override)
3119 set_seg_override(c, c->modrm_seg);
3120 } else if (c->d & MemAbs)
3121 rc = decode_abs(ctxt, ops, &memop);
3122 if (rc != X86EMUL_CONTINUE)
3125 if (!c->has_seg_override)
3126 set_seg_override(c, VCPU_SREG_DS);
3128 memop.addr.mem.seg = seg_override(ctxt, ops, c);
3130 if (memop.type == OP_MEM && c->ad_bytes != 8)
3131 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3133 if (memop.type == OP_MEM && c->rip_relative)
3134 memop.addr.mem.ea += c->eip;
3137 * Decode and fetch the source operand: register, memory
3140 switch (c->d & SrcMask) {
3144 decode_register_operand(ctxt, &c->src, c, 0);
3153 memop.bytes = (c->d & ByteOp) ? 1 :
3159 rc = decode_imm(ctxt, &c->src, 2, false);
3162 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
3165 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
3168 rc = decode_imm(ctxt, &c->src, 1, true);
3171 rc = decode_imm(ctxt, &c->src, 1, false);
3174 c->src.type = OP_REG;
3175 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3176 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
3177 fetch_register_operand(&c->src);
3184 c->src.type = OP_MEM;
3185 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3186 c->src.addr.mem.ea =
3187 register_address(c, c->regs[VCPU_REGS_RSI]);
3188 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
3192 c->src.type = OP_IMM;
3193 c->src.addr.mem.ea = c->eip;
3194 c->src.bytes = c->op_bytes + 2;
3195 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
3198 memop.bytes = c->op_bytes + 2;
3203 if (rc != X86EMUL_CONTINUE)
3207 * Decode and fetch the second source operand: register, memory
3210 switch (c->d & Src2Mask) {
3215 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
3218 rc = decode_imm(ctxt, &c->src2, 1, true);
3225 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3229 if (rc != X86EMUL_CONTINUE)
3232 /* Decode and fetch the destination operand: register or memory. */
3233 switch (c->d & DstMask) {
3235 decode_register_operand(ctxt, &c->dst, c,
3236 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3239 c->dst.type = OP_IMM;
3240 c->dst.addr.mem.ea = c->eip;
3242 c->dst.val = insn_fetch(u8, 1, c->eip);
3247 if ((c->d & DstMask) == DstMem64)
3250 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3252 fetch_bit_operand(c);
3253 c->dst.orig_val = c->dst.val;
3256 c->dst.type = OP_REG;
3257 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3258 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3259 fetch_register_operand(&c->dst);
3260 c->dst.orig_val = c->dst.val;
3263 c->dst.type = OP_MEM;
3264 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3265 c->dst.addr.mem.ea =
3266 register_address(c, c->regs[VCPU_REGS_RDI]);
3267 c->dst.addr.mem.seg = VCPU_SREG_ES;
3271 /* Special instructions do their own operand decoding. */
3273 c->dst.type = OP_NONE; /* Disable writeback. */
3278 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3281 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3283 struct decode_cache *c = &ctxt->decode;
3285 /* The second termination condition only applies for REPE
3286 * and REPNE. Test if the repeat string operation prefix is
3287 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3288 * corresponding termination condition according to:
3289 * - if REPE/REPZ and ZF = 0 then done
3290 * - if REPNE/REPNZ and ZF = 1 then done
3292 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3293 (c->b == 0xae) || (c->b == 0xaf))
3294 && (((c->rep_prefix == REPE_PREFIX) &&
3295 ((ctxt->eflags & EFLG_ZF) == 0))
3296 || ((c->rep_prefix == REPNE_PREFIX) &&
3297 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3304 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3306 struct x86_emulate_ops *ops = ctxt->ops;
3308 struct decode_cache *c = &ctxt->decode;
3309 int rc = X86EMUL_CONTINUE;
3310 int saved_dst_type = c->dst.type;
3311 int irq; /* Used for int 3, int, and into */
3313 ctxt->decode.mem_read.pos = 0;
3315 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3316 rc = emulate_ud(ctxt);
3320 /* LOCK prefix is allowed only with some instructions */
3321 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3322 rc = emulate_ud(ctxt);
3326 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3327 rc = emulate_ud(ctxt);
3332 && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM)
3333 || !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) {
3334 rc = emulate_ud(ctxt);
3338 if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) {
3339 rc = emulate_nm(ctxt);
3343 if (unlikely(ctxt->guest_mode) && c->intercept) {
3344 rc = emulator_check_intercept(ctxt, c->intercept,
3345 X86_ICPT_PRE_EXCEPT);
3346 if (rc != X86EMUL_CONTINUE)
3350 /* Privileged instruction can be executed only in CPL=0 */
3351 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3352 rc = emulate_gp(ctxt, 0);
3356 /* Instruction can only be executed in protected mode */
3357 if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3358 rc = emulate_ud(ctxt);
3362 /* Do instruction specific permission checks */
3363 if (c->check_perm) {
3364 rc = c->check_perm(ctxt);
3365 if (rc != X86EMUL_CONTINUE)
3369 if (unlikely(ctxt->guest_mode) && c->intercept) {
3370 rc = emulator_check_intercept(ctxt, c->intercept,
3371 X86_ICPT_POST_EXCEPT);
3372 if (rc != X86EMUL_CONTINUE)
3376 if (c->rep_prefix && (c->d & String)) {
3377 /* All REP prefixes have the same first termination condition */
3378 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3384 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3385 rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3386 c->src.valptr, c->src.bytes);
3387 if (rc != X86EMUL_CONTINUE)
3389 c->src.orig_val64 = c->src.val64;
3392 if (c->src2.type == OP_MEM) {
3393 rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3394 &c->src2.val, c->src2.bytes);
3395 if (rc != X86EMUL_CONTINUE)
3399 if ((c->d & DstMask) == ImplicitOps)
3403 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3404 /* optimisation - avoid slow emulated read if Mov */
3405 rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3406 &c->dst.val, c->dst.bytes);
3407 if (rc != X86EMUL_CONTINUE)
3410 c->dst.orig_val = c->dst.val;
3414 if (unlikely(ctxt->guest_mode) && c->intercept) {
3415 rc = emulator_check_intercept(ctxt, c->intercept,
3416 X86_ICPT_POST_MEMACCESS);
3417 if (rc != X86EMUL_CONTINUE)
3422 rc = c->execute(ctxt);
3423 if (rc != X86EMUL_CONTINUE)
3434 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3436 case 0x06: /* push es */
3437 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3439 case 0x07: /* pop es */
3440 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3444 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3446 case 0x0e: /* push cs */
3447 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3451 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3453 case 0x16: /* push ss */
3454 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3456 case 0x17: /* pop ss */
3457 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3461 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3463 case 0x1e: /* push ds */
3464 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3466 case 0x1f: /* pop ds */
3467 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3471 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3475 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3479 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3483 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3485 case 0x40 ... 0x47: /* inc r16/r32 */
3486 emulate_1op("inc", c->dst, ctxt->eflags);
3488 case 0x48 ... 0x4f: /* dec r16/r32 */
3489 emulate_1op("dec", c->dst, ctxt->eflags);
3491 case 0x58 ... 0x5f: /* pop reg */
3493 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3495 case 0x60: /* pusha */
3496 rc = emulate_pusha(ctxt, ops);
3498 case 0x61: /* popa */
3499 rc = emulate_popa(ctxt, ops);
3501 case 0x63: /* movsxd */
3502 if (ctxt->mode != X86EMUL_MODE_PROT64)
3503 goto cannot_emulate;
3504 c->dst.val = (s32) c->src.val;
3506 case 0x6c: /* insb */
3507 case 0x6d: /* insw/insd */
3508 c->src.val = c->regs[VCPU_REGS_RDX];
3510 case 0x6e: /* outsb */
3511 case 0x6f: /* outsw/outsd */
3512 c->dst.val = c->regs[VCPU_REGS_RDX];
3515 case 0x70 ... 0x7f: /* jcc (short) */
3516 if (test_cc(c->b, ctxt->eflags))
3517 jmp_rel(c, c->src.val);
3519 case 0x80 ... 0x83: /* Grp1 */
3520 switch (c->modrm_reg) {
3541 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3543 case 0x86 ... 0x87: /* xchg */
3545 /* Write back the register source. */
3546 c->src.val = c->dst.val;
3547 write_register_operand(&c->src);
3549 * Write back the memory destination with implicit LOCK
3552 c->dst.val = c->src.orig_val;
3555 case 0x8c: /* mov r/m, sreg */
3556 if (c->modrm_reg > VCPU_SREG_GS) {
3557 rc = emulate_ud(ctxt);
3560 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3562 case 0x8d: /* lea r16/r32, m */
3563 c->dst.val = c->src.addr.mem.ea;
3565 case 0x8e: { /* mov seg, r/m16 */
3570 if (c->modrm_reg == VCPU_SREG_CS ||
3571 c->modrm_reg > VCPU_SREG_GS) {
3572 rc = emulate_ud(ctxt);
3576 if (c->modrm_reg == VCPU_SREG_SS)
3577 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3579 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3581 c->dst.type = OP_NONE; /* Disable writeback. */
3584 case 0x8f: /* pop (sole member of Grp1a) */
3585 rc = emulate_grp1a(ctxt, ops);
3587 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3588 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3591 case 0x98: /* cbw/cwde/cdqe */
3592 switch (c->op_bytes) {
3593 case 2: c->dst.val = (s8)c->dst.val; break;
3594 case 4: c->dst.val = (s16)c->dst.val; break;
3595 case 8: c->dst.val = (s32)c->dst.val; break;
3598 case 0x9c: /* pushf */
3599 c->src.val = (unsigned long) ctxt->eflags;
3600 emulate_push(ctxt, ops);
3602 case 0x9d: /* popf */
3603 c->dst.type = OP_REG;
3604 c->dst.addr.reg = &ctxt->eflags;
3605 c->dst.bytes = c->op_bytes;
3606 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3608 case 0xa6 ... 0xa7: /* cmps */
3609 c->dst.type = OP_NONE; /* Disable writeback. */
3611 case 0xa8 ... 0xa9: /* test ax, imm */
3613 case 0xae ... 0xaf: /* scas */
3618 case 0xc3: /* ret */
3619 c->dst.type = OP_REG;
3620 c->dst.addr.reg = &c->eip;
3621 c->dst.bytes = c->op_bytes;
3622 goto pop_instruction;
3623 case 0xc4: /* les */
3624 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3626 case 0xc5: /* lds */
3627 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3629 case 0xcb: /* ret far */
3630 rc = emulate_ret_far(ctxt, ops);
3632 case 0xcc: /* int3 */
3635 case 0xcd: /* int n */
3638 rc = emulate_int(ctxt, ops, irq);
3640 case 0xce: /* into */
3641 if (ctxt->eflags & EFLG_OF) {
3646 case 0xcf: /* iret */
3647 rc = emulate_iret(ctxt, ops);
3649 case 0xd0 ... 0xd1: /* Grp2 */
3652 case 0xd2 ... 0xd3: /* Grp2 */
3653 c->src.val = c->regs[VCPU_REGS_RCX];
3656 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3657 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3658 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3659 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3660 jmp_rel(c, c->src.val);
3662 case 0xe3: /* jcxz/jecxz/jrcxz */
3663 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3664 jmp_rel(c, c->src.val);
3666 case 0xe4: /* inb */
3669 case 0xe6: /* outb */
3670 case 0xe7: /* out */
3672 case 0xe8: /* call (near) */ {
3673 long int rel = c->src.val;
3674 c->src.val = (unsigned long) c->eip;
3676 emulate_push(ctxt, ops);
3679 case 0xe9: /* jmp rel */
3681 case 0xea: { /* jmp far */
3684 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3686 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3690 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3694 jmp: /* jmp rel short */
3695 jmp_rel(c, c->src.val);
3696 c->dst.type = OP_NONE; /* Disable writeback. */
3698 case 0xec: /* in al,dx */
3699 case 0xed: /* in (e/r)ax,dx */
3700 c->src.val = c->regs[VCPU_REGS_RDX];
3702 c->dst.bytes = min(c->dst.bytes, 4u);
3703 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3704 rc = emulate_gp(ctxt, 0);
3707 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3709 goto done; /* IO is needed */
3711 case 0xee: /* out dx,al */
3712 case 0xef: /* out dx,(e/r)ax */
3713 c->dst.val = c->regs[VCPU_REGS_RDX];
3715 c->src.bytes = min(c->src.bytes, 4u);
3716 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3718 rc = emulate_gp(ctxt, 0);
3721 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3722 &c->src.val, 1, ctxt->vcpu);
3723 c->dst.type = OP_NONE; /* Disable writeback. */
3725 case 0xf4: /* hlt */
3726 ctxt->vcpu->arch.halt_request = 1;
3728 case 0xf5: /* cmc */
3729 /* complement carry flag from eflags reg */
3730 ctxt->eflags ^= EFLG_CF;
3732 case 0xf6 ... 0xf7: /* Grp3 */
3733 rc = emulate_grp3(ctxt, ops);
3735 case 0xf8: /* clc */
3736 ctxt->eflags &= ~EFLG_CF;
3738 case 0xf9: /* stc */
3739 ctxt->eflags |= EFLG_CF;
3741 case 0xfa: /* cli */
3742 if (emulator_bad_iopl(ctxt, ops)) {
3743 rc = emulate_gp(ctxt, 0);
3746 ctxt->eflags &= ~X86_EFLAGS_IF;
3748 case 0xfb: /* sti */
3749 if (emulator_bad_iopl(ctxt, ops)) {
3750 rc = emulate_gp(ctxt, 0);
3753 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3754 ctxt->eflags |= X86_EFLAGS_IF;
3757 case 0xfc: /* cld */
3758 ctxt->eflags &= ~EFLG_DF;
3760 case 0xfd: /* std */
3761 ctxt->eflags |= EFLG_DF;
3763 case 0xfe: /* Grp4 */
3765 rc = emulate_grp45(ctxt, ops);
3767 case 0xff: /* Grp5 */
3768 if (c->modrm_reg == 5)
3772 goto cannot_emulate;
3775 if (rc != X86EMUL_CONTINUE)
3779 rc = writeback(ctxt, ops);
3780 if (rc != X86EMUL_CONTINUE)
3784 * restore dst type in case the decoding will be reused
3785 * (happens for string instruction )
3787 c->dst.type = saved_dst_type;
3789 if ((c->d & SrcMask) == SrcSI)
3790 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3791 VCPU_REGS_RSI, &c->src);
3793 if ((c->d & DstMask) == DstDI)
3794 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3797 if (c->rep_prefix && (c->d & String)) {
3798 struct read_cache *r = &ctxt->decode.io_read;
3799 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3801 if (!string_insn_completed(ctxt)) {
3803 * Re-enter guest when pio read ahead buffer is empty
3804 * or, if it is not used, after each 1024 iteration.
3806 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3807 (r->end == 0 || r->end != r->pos)) {
3809 * Reset read cache. Usually happens before
3810 * decode, but since instruction is restarted
3811 * we have to do it here.
3813 ctxt->decode.mem_read.end = 0;
3814 return EMULATION_RESTART;
3816 goto done; /* skip rip writeback */
3823 if (rc == X86EMUL_PROPAGATE_FAULT)
3824 ctxt->have_exception = true;
3825 if (rc == X86EMUL_INTERCEPTED)
3826 return EMULATION_INTERCEPTED;
3828 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3832 case 0x01: /* lgdt, lidt, lmsw */
3833 switch (c->modrm_reg) {
3835 unsigned long address;
3837 case 0: /* vmcall */
3838 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3839 goto cannot_emulate;
3841 rc = kvm_fix_hypercall(ctxt->vcpu);
3842 if (rc != X86EMUL_CONTINUE)
3845 /* Let the processor re-execute the fixed hypercall */
3847 /* Disable writeback. */
3848 c->dst.type = OP_NONE;
3851 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3852 &size, &address, c->op_bytes);
3853 if (rc != X86EMUL_CONTINUE)
3855 realmode_lgdt(ctxt->vcpu, size, address);
3856 /* Disable writeback. */
3857 c->dst.type = OP_NONE;
3859 case 3: /* lidt/vmmcall */
3860 if (c->modrm_mod == 3) {
3861 switch (c->modrm_rm) {
3863 rc = kvm_fix_hypercall(ctxt->vcpu);
3866 goto cannot_emulate;
3869 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3872 if (rc != X86EMUL_CONTINUE)
3874 realmode_lidt(ctxt->vcpu, size, address);
3876 /* Disable writeback. */
3877 c->dst.type = OP_NONE;
3881 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3884 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3885 (c->src.val & 0x0f), ctxt->vcpu);
3886 c->dst.type = OP_NONE;
3888 case 5: /* not defined */
3890 rc = X86EMUL_PROPAGATE_FAULT;
3893 emulate_invlpg(ctxt->vcpu,
3894 linear(ctxt, c->src.addr.mem));
3895 /* Disable writeback. */
3896 c->dst.type = OP_NONE;
3899 goto cannot_emulate;
3902 case 0x05: /* syscall */
3903 rc = emulate_syscall(ctxt, ops);
3906 emulate_clts(ctxt->vcpu);
3908 case 0x09: /* wbinvd */
3909 kvm_emulate_wbinvd(ctxt->vcpu);
3911 case 0x08: /* invd */
3912 case 0x0d: /* GrpP (prefetch) */
3913 case 0x18: /* Grp16 (prefetch/nop) */
3915 case 0x20: /* mov cr, reg */
3916 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3918 case 0x21: /* mov from dr to reg */
3919 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3921 case 0x22: /* mov reg, cr */
3922 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3923 emulate_gp(ctxt, 0);
3924 rc = X86EMUL_PROPAGATE_FAULT;
3927 c->dst.type = OP_NONE;
3929 case 0x23: /* mov from reg to dr */
3930 if (ops->set_dr(c->modrm_reg, c->src.val &
3931 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3932 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3933 /* #UD condition is already handled by the code above */
3934 emulate_gp(ctxt, 0);
3935 rc = X86EMUL_PROPAGATE_FAULT;
3939 c->dst.type = OP_NONE; /* no writeback */
3943 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3944 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3945 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3946 emulate_gp(ctxt, 0);
3947 rc = X86EMUL_PROPAGATE_FAULT;
3950 rc = X86EMUL_CONTINUE;
3954 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3955 emulate_gp(ctxt, 0);
3956 rc = X86EMUL_PROPAGATE_FAULT;
3959 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3960 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3962 rc = X86EMUL_CONTINUE;
3964 case 0x34: /* sysenter */
3965 rc = emulate_sysenter(ctxt, ops);
3967 case 0x35: /* sysexit */
3968 rc = emulate_sysexit(ctxt, ops);
3970 case 0x40 ... 0x4f: /* cmov */
3971 c->dst.val = c->dst.orig_val = c->src.val;
3972 if (!test_cc(c->b, ctxt->eflags))
3973 c->dst.type = OP_NONE; /* no writeback */
3975 case 0x80 ... 0x8f: /* jnz rel, etc*/
3976 if (test_cc(c->b, ctxt->eflags))
3977 jmp_rel(c, c->src.val);
3979 case 0x90 ... 0x9f: /* setcc r/m8 */
3980 c->dst.val = test_cc(c->b, ctxt->eflags);
3982 case 0xa0: /* push fs */
3983 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3985 case 0xa1: /* pop fs */
3986 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3990 c->dst.type = OP_NONE;
3991 /* only subword offset */
3992 c->src.val &= (c->dst.bytes << 3) - 1;
3993 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3995 case 0xa4: /* shld imm8, r, r/m */
3996 case 0xa5: /* shld cl, r, r/m */
3997 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3999 case 0xa8: /* push gs */
4000 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
4002 case 0xa9: /* pop gs */
4003 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
4007 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
4009 case 0xac: /* shrd imm8, r, r/m */
4010 case 0xad: /* shrd cl, r, r/m */
4011 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
4013 case 0xae: /* clflush */
4015 case 0xb0 ... 0xb1: /* cmpxchg */
4017 * Save real source value, then compare EAX against
4020 c->src.orig_val = c->src.val;
4021 c->src.val = c->regs[VCPU_REGS_RAX];
4022 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
4023 if (ctxt->eflags & EFLG_ZF) {
4024 /* Success: write back to memory. */
4025 c->dst.val = c->src.orig_val;
4027 /* Failure: write the value we saw to EAX. */
4028 c->dst.type = OP_REG;
4029 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
4032 case 0xb2: /* lss */
4033 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
4037 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4039 case 0xb4: /* lfs */
4040 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
4042 case 0xb5: /* lgs */
4043 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
4045 case 0xb6 ... 0xb7: /* movzx */
4046 c->dst.bytes = c->op_bytes;
4047 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
4050 case 0xba: /* Grp8 */
4051 switch (c->modrm_reg & 3) {
4064 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
4066 case 0xbc: { /* bsf */
4068 __asm__ ("bsf %2, %0; setz %1"
4069 : "=r"(c->dst.val), "=q"(zf)
4071 ctxt->eflags &= ~X86_EFLAGS_ZF;
4073 ctxt->eflags |= X86_EFLAGS_ZF;
4074 c->dst.type = OP_NONE; /* Disable writeback. */
4078 case 0xbd: { /* bsr */
4080 __asm__ ("bsr %2, %0; setz %1"
4081 : "=r"(c->dst.val), "=q"(zf)
4083 ctxt->eflags &= ~X86_EFLAGS_ZF;
4085 ctxt->eflags |= X86_EFLAGS_ZF;
4086 c->dst.type = OP_NONE; /* Disable writeback. */
4090 case 0xbe ... 0xbf: /* movsx */
4091 c->dst.bytes = c->op_bytes;
4092 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
4095 case 0xc0 ... 0xc1: /* xadd */
4096 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
4097 /* Write back the register source. */
4098 c->src.val = c->dst.orig_val;
4099 write_register_operand(&c->src);
4101 case 0xc3: /* movnti */
4102 c->dst.bytes = c->op_bytes;
4103 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
4106 case 0xc7: /* Grp9 (cmpxchg8b) */
4107 rc = emulate_grp9(ctxt, ops);
4110 goto cannot_emulate;
4113 if (rc != X86EMUL_CONTINUE)