1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstMem16 (OpMem16 << DstShift)
90 #define DstImmUByte (OpImmUByte << DstShift)
91 #define DstDX (OpDX << DstShift)
92 #define DstAccLo (OpAccLo << DstShift)
93 #define DstMask (OpMask << DstShift)
94 /* Source operand type. */
96 #define SrcNone (OpNone << SrcShift)
97 #define SrcReg (OpReg << SrcShift)
98 #define SrcMem (OpMem << SrcShift)
99 #define SrcMem16 (OpMem16 << SrcShift)
100 #define SrcMem32 (OpMem32 << SrcShift)
101 #define SrcImm (OpImm << SrcShift)
102 #define SrcImmByte (OpImmByte << SrcShift)
103 #define SrcOne (OpOne << SrcShift)
104 #define SrcImmUByte (OpImmUByte << SrcShift)
105 #define SrcImmU (OpImmU << SrcShift)
106 #define SrcSI (OpSI << SrcShift)
107 #define SrcXLat (OpXLat << SrcShift)
108 #define SrcImmFAddr (OpImmFAddr << SrcShift)
109 #define SrcMemFAddr (OpMemFAddr << SrcShift)
110 #define SrcAcc (OpAcc << SrcShift)
111 #define SrcImmU16 (OpImmU16 << SrcShift)
112 #define SrcImm64 (OpImm64 << SrcShift)
113 #define SrcDX (OpDX << SrcShift)
114 #define SrcMem8 (OpMem8 << SrcShift)
115 #define SrcAccHi (OpAccHi << SrcShift)
116 #define SrcMask (OpMask << SrcShift)
117 #define BitOp (1<<11)
118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
119 #define String (1<<13) /* String instruction (rep capable) */
120 #define Stack (1<<14) /* Stack instruction (push/pop) */
121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
126 #define Escape (5<<15) /* Escape to coprocessor instruction */
127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
128 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
129 #define Sse (1<<18) /* SSE Vector instruction */
130 /* Generic ModRM decode. */
131 #define ModRM (1<<19)
132 /* Destination is only written; never read. */
135 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
136 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
137 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
138 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
139 #define Undefined (1<<25) /* No Such Instruction */
140 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
141 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define PageTable (1 << 29) /* instruction used to write page table */
144 #define NotImpl (1 << 30) /* instruction is not implemented */
145 /* Source 2 operand type */
146 #define Src2Shift (31)
147 #define Src2None (OpNone << Src2Shift)
148 #define Src2Mem (OpMem << Src2Shift)
149 #define Src2CL (OpCL << Src2Shift)
150 #define Src2ImmByte (OpImmByte << Src2Shift)
151 #define Src2One (OpOne << Src2Shift)
152 #define Src2Imm (OpImm << Src2Shift)
153 #define Src2ES (OpES << Src2Shift)
154 #define Src2CS (OpCS << Src2Shift)
155 #define Src2SS (OpSS << Src2Shift)
156 #define Src2DS (OpDS << Src2Shift)
157 #define Src2FS (OpFS << Src2Shift)
158 #define Src2GS (OpGS << Src2Shift)
159 #define Src2Mask (OpMask << Src2Shift)
160 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
161 #define AlignMask ((u64)7 << 41)
162 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
163 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
164 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
165 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
166 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
167 #define NoWrite ((u64)1 << 45) /* No writeback */
168 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
169 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
170 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
171 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
172 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
173 #define NearBranch ((u64)1 << 52) /* Near branches */
174 #define No16 ((u64)1 << 53) /* No 16 bit operand */
175 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
176 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
180 #define X2(x...) x, x
181 #define X3(x...) X2(x), x
182 #define X4(x...) X2(x), X2(x)
183 #define X5(x...) X4(x), x
184 #define X6(x...) X4(x), X2(x)
185 #define X7(x...) X4(x), X3(x)
186 #define X8(x...) X4(x), X4(x)
187 #define X16(x...) X8(x), X8(x)
189 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
190 #define FASTOP_SIZE 8
193 * fastop functions have a special calling convention:
198 * flags: rflags (in/out)
199 * ex: rsi (in:fastop pointer, out:zero if exception)
201 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
202 * different operand sizes can be reached by calculation, rather than a jump
203 * table (which would be bigger than the code).
205 * fastop functions are declared as taking a never-defined fastop parameter,
206 * so they can't be called from C directly.
215 int (*execute)(struct x86_emulate_ctxt *ctxt);
216 const struct opcode *group;
217 const struct group_dual *gdual;
218 const struct gprefix *gprefix;
219 const struct escape *esc;
220 const struct instr_dual *idual;
221 const struct mode_dual *mdual;
222 void (*fastop)(struct fastop *fake);
224 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
228 struct opcode mod012[8];
229 struct opcode mod3[8];
233 struct opcode pfx_no;
234 struct opcode pfx_66;
235 struct opcode pfx_f2;
236 struct opcode pfx_f3;
241 struct opcode high[64];
245 struct opcode mod012;
250 struct opcode mode32;
251 struct opcode mode64;
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 enum x86_transfer_type {
258 X86_TRANSFER_CALL_JMP,
260 X86_TRANSFER_TASK_SWITCH,
263 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265 if (!(ctxt->regs_valid & (1 << nr))) {
266 ctxt->regs_valid |= 1 << nr;
267 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 return ctxt->_regs[nr];
272 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274 ctxt->regs_valid |= 1 << nr;
275 ctxt->regs_dirty |= 1 << nr;
276 return &ctxt->_regs[nr];
279 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
282 return reg_write(ctxt, nr);
285 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
289 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
290 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
293 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295 ctxt->regs_dirty = 0;
296 ctxt->regs_valid = 0;
300 * These EFLAGS bits are restored from saved value during emulation, and
301 * any changes are written back to the saved value after emulation.
303 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
304 X86_EFLAGS_PF|X86_EFLAGS_CF)
312 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314 #define FOP_FUNC(name) \
315 ".align " __stringify(FASTOP_SIZE) " \n\t" \
316 ".type " name ", @function \n\t" \
319 #define FOP_RET "ret \n\t"
321 #define FOP_START(op) \
322 extern void em_##op(struct fastop *fake); \
323 asm(".pushsection .text, \"ax\" \n\t" \
324 ".global em_" #op " \n\t" \
331 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
334 #define FOP1E(op, dst) \
335 FOP_FUNC(#op "_" #dst) \
336 "10: " #op " %" #dst " \n\t" FOP_RET
338 #define FOP1EEX(op, dst) \
339 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
341 #define FASTOP1(op) \
346 ON64(FOP1E(op##q, rax)) \
349 /* 1-operand, using src2 (for MUL/DIV r/m) */
350 #define FASTOP1SRC2(op, name) \
355 ON64(FOP1E(op, rcx)) \
358 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
359 #define FASTOP1SRC2EX(op, name) \
364 ON64(FOP1EEX(op, rcx)) \
367 #define FOP2E(op, dst, src) \
368 FOP_FUNC(#op "_" #dst "_" #src) \
369 #op " %" #src ", %" #dst " \n\t" FOP_RET
371 #define FASTOP2(op) \
373 FOP2E(op##b, al, dl) \
374 FOP2E(op##w, ax, dx) \
375 FOP2E(op##l, eax, edx) \
376 ON64(FOP2E(op##q, rax, rdx)) \
379 /* 2 operand, word only */
380 #define FASTOP2W(op) \
383 FOP2E(op##w, ax, dx) \
384 FOP2E(op##l, eax, edx) \
385 ON64(FOP2E(op##q, rax, rdx)) \
388 /* 2 operand, src is CL */
389 #define FASTOP2CL(op) \
391 FOP2E(op##b, al, cl) \
392 FOP2E(op##w, ax, cl) \
393 FOP2E(op##l, eax, cl) \
394 ON64(FOP2E(op##q, rax, cl)) \
397 /* 2 operand, src and dest are reversed */
398 #define FASTOP2R(op, name) \
400 FOP2E(op##b, dl, al) \
401 FOP2E(op##w, dx, ax) \
402 FOP2E(op##l, edx, eax) \
403 ON64(FOP2E(op##q, rdx, rax)) \
406 #define FOP3E(op, dst, src, src2) \
407 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
408 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
410 /* 3-operand, word-only, src2=cl */
411 #define FASTOP3WCL(op) \
414 FOP3E(op##w, ax, dx, cl) \
415 FOP3E(op##l, eax, edx, cl) \
416 ON64(FOP3E(op##q, rax, rdx, cl)) \
419 /* Special case for SETcc - 1 instruction per cc */
420 #define FOP_SETCC(op) \
422 ".type " #op ", @function \n\t" \
427 asm(".global kvm_fastop_exception \n"
428 "kvm_fastop_exception: xor %esi, %esi; ret");
449 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453 * XXX: inoutclob user must know where the argument is being expanded.
454 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
456 #define asm_safe(insn, inoutclob...) \
460 asm volatile("1:" insn "\n" \
462 ".pushsection .fixup, \"ax\"\n" \
463 "3: movl $1, %[_fault]\n" \
466 _ASM_EXTABLE(1b, 3b) \
467 : [_fault] "+qm"(_fault) inoutclob ); \
469 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
472 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
473 enum x86_intercept intercept,
474 enum x86_intercept_stage stage)
476 struct x86_instruction_info info = {
477 .intercept = intercept,
478 .rep_prefix = ctxt->rep_prefix,
479 .modrm_mod = ctxt->modrm_mod,
480 .modrm_reg = ctxt->modrm_reg,
481 .modrm_rm = ctxt->modrm_rm,
482 .src_val = ctxt->src.val64,
483 .dst_val = ctxt->dst.val64,
484 .src_bytes = ctxt->src.bytes,
485 .dst_bytes = ctxt->dst.bytes,
486 .ad_bytes = ctxt->ad_bytes,
487 .next_rip = ctxt->eip,
490 return ctxt->ops->intercept(ctxt, &info, stage);
493 static void assign_masked(ulong *dest, ulong src, ulong mask)
495 *dest = (*dest & ~mask) | (src & mask);
498 static void assign_register(unsigned long *reg, u64 val, int bytes)
500 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
503 *(u8 *)reg = (u8)val;
506 *(u16 *)reg = (u16)val;
510 break; /* 64b: zero-extend */
517 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
519 return (1UL << (ctxt->ad_bytes << 3)) - 1;
522 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
525 struct desc_struct ss;
527 if (ctxt->mode == X86EMUL_MODE_PROT64)
529 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
530 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
533 static int stack_size(struct x86_emulate_ctxt *ctxt)
535 return (__fls(stack_mask(ctxt)) + 1) >> 3;
538 /* Access/update address held in a register, based on addressing mode. */
539 static inline unsigned long
540 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
542 if (ctxt->ad_bytes == sizeof(unsigned long))
545 return reg & ad_mask(ctxt);
548 static inline unsigned long
549 register_address(struct x86_emulate_ctxt *ctxt, int reg)
551 return address_mask(ctxt, reg_read(ctxt, reg));
554 static void masked_increment(ulong *reg, ulong mask, int inc)
556 assign_masked(reg, *reg + inc, mask);
560 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
562 ulong *preg = reg_rmw(ctxt, reg);
564 assign_register(preg, *preg + inc, ctxt->ad_bytes);
567 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
569 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
572 static u32 desc_limit_scaled(struct desc_struct *desc)
574 u32 limit = get_desc_limit(desc);
576 return desc->g ? (limit << 12) | 0xfff : limit;
579 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
581 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
584 return ctxt->ops->get_cached_segment_base(ctxt, seg);
587 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
588 u32 error, bool valid)
591 ctxt->exception.vector = vec;
592 ctxt->exception.error_code = error;
593 ctxt->exception.error_code_valid = valid;
594 return X86EMUL_PROPAGATE_FAULT;
597 static int emulate_db(struct x86_emulate_ctxt *ctxt)
599 return emulate_exception(ctxt, DB_VECTOR, 0, false);
602 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
604 return emulate_exception(ctxt, GP_VECTOR, err, true);
607 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
609 return emulate_exception(ctxt, SS_VECTOR, err, true);
612 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
614 return emulate_exception(ctxt, UD_VECTOR, 0, false);
617 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
619 return emulate_exception(ctxt, TS_VECTOR, err, true);
622 static int emulate_de(struct x86_emulate_ctxt *ctxt)
624 return emulate_exception(ctxt, DE_VECTOR, 0, false);
627 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
629 return emulate_exception(ctxt, NM_VECTOR, 0, false);
632 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
635 struct desc_struct desc;
637 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
641 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
646 struct desc_struct desc;
648 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
649 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
653 * x86 defines three classes of vector instructions: explicitly
654 * aligned, explicitly unaligned, and the rest, which change behaviour
655 * depending on whether they're AVX encoded or not.
657 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
658 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
659 * 512 bytes of data must be aligned to a 16 byte boundary.
661 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
663 u64 alignment = ctxt->d & AlignMask;
665 if (likely(size < 16))
680 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
681 struct segmented_address addr,
682 unsigned *max_size, unsigned size,
683 bool write, bool fetch,
684 enum x86emul_mode mode, ulong *linear)
686 struct desc_struct desc;
692 la = seg_base(ctxt, addr.seg) + addr.ea;
695 case X86EMUL_MODE_PROT64:
697 if (is_noncanonical_address(la))
700 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
701 if (size > *max_size)
705 *linear = la = (u32)la;
706 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
710 /* code segment in protected mode or read-only data segment */
711 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
712 || !(desc.type & 2)) && write)
714 /* unreadable code segment */
715 if (!fetch && (desc.type & 8) && !(desc.type & 2))
717 lim = desc_limit_scaled(&desc);
718 if (!(desc.type & 8) && (desc.type & 4)) {
719 /* expand-down segment */
722 lim = desc.d ? 0xffffffff : 0xffff;
726 if (lim == 0xffffffff)
729 *max_size = (u64)lim + 1 - addr.ea;
730 if (size > *max_size)
735 if (la & (insn_alignment(ctxt, size) - 1))
736 return emulate_gp(ctxt, 0);
737 return X86EMUL_CONTINUE;
739 if (addr.seg == VCPU_SREG_SS)
740 return emulate_ss(ctxt, 0);
742 return emulate_gp(ctxt, 0);
745 static int linearize(struct x86_emulate_ctxt *ctxt,
746 struct segmented_address addr,
747 unsigned size, bool write,
751 return __linearize(ctxt, addr, &max_size, size, write, false,
755 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
756 enum x86emul_mode mode)
761 struct segmented_address addr = { .seg = VCPU_SREG_CS,
764 if (ctxt->op_bytes != sizeof(unsigned long))
765 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
766 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
767 if (rc == X86EMUL_CONTINUE)
768 ctxt->_eip = addr.ea;
772 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
774 return assign_eip(ctxt, dst, ctxt->mode);
777 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
778 const struct desc_struct *cs_desc)
780 enum x86emul_mode mode = ctxt->mode;
784 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
788 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
790 mode = X86EMUL_MODE_PROT64;
792 mode = X86EMUL_MODE_PROT32; /* temporary value */
795 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
796 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
797 rc = assign_eip(ctxt, dst, mode);
798 if (rc == X86EMUL_CONTINUE)
803 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
805 return assign_eip_near(ctxt, ctxt->_eip + rel);
808 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
809 struct segmented_address addr,
816 rc = linearize(ctxt, addr, size, false, &linear);
817 if (rc != X86EMUL_CONTINUE)
819 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
822 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
823 struct segmented_address addr,
830 rc = linearize(ctxt, addr, size, true, &linear);
831 if (rc != X86EMUL_CONTINUE)
833 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
837 * Prefetch the remaining bytes of the instruction without crossing page
838 * boundary if they are not in fetch_cache yet.
840 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
843 unsigned size, max_size;
844 unsigned long linear;
845 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
846 struct segmented_address addr = { .seg = VCPU_SREG_CS,
847 .ea = ctxt->eip + cur_size };
850 * We do not know exactly how many bytes will be needed, and
851 * __linearize is expensive, so fetch as much as possible. We
852 * just have to avoid going beyond the 15 byte limit, the end
853 * of the segment, or the end of the page.
855 * __linearize is called with size 0 so that it does not do any
856 * boundary check itself. Instead, we use max_size to check
859 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
861 if (unlikely(rc != X86EMUL_CONTINUE))
864 size = min_t(unsigned, 15UL ^ cur_size, max_size);
865 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
868 * One instruction can only straddle two pages,
869 * and one has been loaded at the beginning of
870 * x86_decode_insn. So, if not enough bytes
871 * still, we must have hit the 15-byte boundary.
873 if (unlikely(size < op_size))
874 return emulate_gp(ctxt, 0);
876 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
877 size, &ctxt->exception);
878 if (unlikely(rc != X86EMUL_CONTINUE))
880 ctxt->fetch.end += size;
881 return X86EMUL_CONTINUE;
884 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
887 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
889 if (unlikely(done_size < size))
890 return __do_insn_fetch_bytes(ctxt, size - done_size);
892 return X86EMUL_CONTINUE;
895 /* Fetch next part of the instruction being emulated. */
896 #define insn_fetch(_type, _ctxt) \
899 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
900 if (rc != X86EMUL_CONTINUE) \
902 ctxt->_eip += sizeof(_type); \
903 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
904 ctxt->fetch.ptr += sizeof(_type); \
908 #define insn_fetch_arr(_arr, _size, _ctxt) \
910 rc = do_insn_fetch_bytes(_ctxt, _size); \
911 if (rc != X86EMUL_CONTINUE) \
913 ctxt->_eip += (_size); \
914 memcpy(_arr, ctxt->fetch.ptr, _size); \
915 ctxt->fetch.ptr += (_size); \
919 * Given the 'reg' portion of a ModRM byte, and a register block, return a
920 * pointer into the block that addresses the relevant register.
921 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
923 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
927 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
929 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
930 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
932 p = reg_rmw(ctxt, modrm_reg);
936 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
937 struct segmented_address addr,
938 u16 *size, unsigned long *address, int op_bytes)
945 rc = segmented_read_std(ctxt, addr, size, 2);
946 if (rc != X86EMUL_CONTINUE)
949 rc = segmented_read_std(ctxt, addr, address, op_bytes);
963 FASTOP1SRC2(mul, mul_ex);
964 FASTOP1SRC2(imul, imul_ex);
965 FASTOP1SRC2EX(div, div_ex);
966 FASTOP1SRC2EX(idiv, idiv_ex);
995 FASTOP2R(cmp, cmp_r);
997 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
999 /* If src is zero, do not writeback, but update flags */
1000 if (ctxt->src.val == 0)
1001 ctxt->dst.type = OP_NONE;
1002 return fastop(ctxt, em_bsf);
1005 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1007 /* If src is zero, do not writeback, but update flags */
1008 if (ctxt->src.val == 0)
1009 ctxt->dst.type = OP_NONE;
1010 return fastop(ctxt, em_bsr);
1013 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1016 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1018 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1019 asm("push %[flags]; popf; call *%[fastop]"
1020 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1024 static void fetch_register_operand(struct operand *op)
1026 switch (op->bytes) {
1028 op->val = *(u8 *)op->addr.reg;
1031 op->val = *(u16 *)op->addr.reg;
1034 op->val = *(u32 *)op->addr.reg;
1037 op->val = *(u64 *)op->addr.reg;
1042 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1044 ctxt->ops->get_fpu(ctxt);
1046 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1047 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1048 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1049 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1050 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1051 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1052 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1053 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1054 #ifdef CONFIG_X86_64
1055 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1056 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1057 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1058 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1059 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1060 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1061 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1062 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1066 ctxt->ops->put_fpu(ctxt);
1069 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1072 ctxt->ops->get_fpu(ctxt);
1074 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1075 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1076 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1077 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1078 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1079 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1080 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1081 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1082 #ifdef CONFIG_X86_64
1083 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1084 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1085 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1086 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1087 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1088 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1089 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1090 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1094 ctxt->ops->put_fpu(ctxt);
1097 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1099 ctxt->ops->get_fpu(ctxt);
1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1103 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1104 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1105 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1106 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1107 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1111 ctxt->ops->put_fpu(ctxt);
1114 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1116 ctxt->ops->get_fpu(ctxt);
1118 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1119 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1120 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1121 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1122 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1123 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1124 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1125 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1128 ctxt->ops->put_fpu(ctxt);
1131 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1133 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1134 return emulate_nm(ctxt);
1136 ctxt->ops->get_fpu(ctxt);
1137 asm volatile("fninit");
1138 ctxt->ops->put_fpu(ctxt);
1139 return X86EMUL_CONTINUE;
1142 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1146 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1147 return emulate_nm(ctxt);
1149 ctxt->ops->get_fpu(ctxt);
1150 asm volatile("fnstcw %0": "+m"(fcw));
1151 ctxt->ops->put_fpu(ctxt);
1153 ctxt->dst.val = fcw;
1155 return X86EMUL_CONTINUE;
1158 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1162 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1163 return emulate_nm(ctxt);
1165 ctxt->ops->get_fpu(ctxt);
1166 asm volatile("fnstsw %0": "+m"(fsw));
1167 ctxt->ops->put_fpu(ctxt);
1169 ctxt->dst.val = fsw;
1171 return X86EMUL_CONTINUE;
1174 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1177 unsigned reg = ctxt->modrm_reg;
1179 if (!(ctxt->d & ModRM))
1180 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1182 if (ctxt->d & Sse) {
1186 read_sse_reg(ctxt, &op->vec_val, reg);
1189 if (ctxt->d & Mmx) {
1198 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1199 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1201 fetch_register_operand(op);
1202 op->orig_val = op->val;
1205 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1207 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1208 ctxt->modrm_seg = VCPU_SREG_SS;
1211 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1215 int index_reg, base_reg, scale;
1216 int rc = X86EMUL_CONTINUE;
1219 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1220 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1221 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1223 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1224 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1225 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1226 ctxt->modrm_seg = VCPU_SREG_DS;
1228 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1230 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1231 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1233 if (ctxt->d & Sse) {
1236 op->addr.xmm = ctxt->modrm_rm;
1237 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1240 if (ctxt->d & Mmx) {
1243 op->addr.mm = ctxt->modrm_rm & 7;
1246 fetch_register_operand(op);
1252 if (ctxt->ad_bytes == 2) {
1253 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1254 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1255 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1256 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1258 /* 16-bit ModR/M decode. */
1259 switch (ctxt->modrm_mod) {
1261 if (ctxt->modrm_rm == 6)
1262 modrm_ea += insn_fetch(u16, ctxt);
1265 modrm_ea += insn_fetch(s8, ctxt);
1268 modrm_ea += insn_fetch(u16, ctxt);
1271 switch (ctxt->modrm_rm) {
1273 modrm_ea += bx + si;
1276 modrm_ea += bx + di;
1279 modrm_ea += bp + si;
1282 modrm_ea += bp + di;
1291 if (ctxt->modrm_mod != 0)
1298 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1299 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1300 ctxt->modrm_seg = VCPU_SREG_SS;
1301 modrm_ea = (u16)modrm_ea;
1303 /* 32/64-bit ModR/M decode. */
1304 if ((ctxt->modrm_rm & 7) == 4) {
1305 sib = insn_fetch(u8, ctxt);
1306 index_reg |= (sib >> 3) & 7;
1307 base_reg |= sib & 7;
1310 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1311 modrm_ea += insn_fetch(s32, ctxt);
1313 modrm_ea += reg_read(ctxt, base_reg);
1314 adjust_modrm_seg(ctxt, base_reg);
1315 /* Increment ESP on POP [ESP] */
1316 if ((ctxt->d & IncSP) &&
1317 base_reg == VCPU_REGS_RSP)
1318 modrm_ea += ctxt->op_bytes;
1321 modrm_ea += reg_read(ctxt, index_reg) << scale;
1322 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1323 modrm_ea += insn_fetch(s32, ctxt);
1324 if (ctxt->mode == X86EMUL_MODE_PROT64)
1325 ctxt->rip_relative = 1;
1327 base_reg = ctxt->modrm_rm;
1328 modrm_ea += reg_read(ctxt, base_reg);
1329 adjust_modrm_seg(ctxt, base_reg);
1331 switch (ctxt->modrm_mod) {
1333 modrm_ea += insn_fetch(s8, ctxt);
1336 modrm_ea += insn_fetch(s32, ctxt);
1340 op->addr.mem.ea = modrm_ea;
1341 if (ctxt->ad_bytes != 8)
1342 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1348 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1351 int rc = X86EMUL_CONTINUE;
1354 switch (ctxt->ad_bytes) {
1356 op->addr.mem.ea = insn_fetch(u16, ctxt);
1359 op->addr.mem.ea = insn_fetch(u32, ctxt);
1362 op->addr.mem.ea = insn_fetch(u64, ctxt);
1369 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1373 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1374 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1376 if (ctxt->src.bytes == 2)
1377 sv = (s16)ctxt->src.val & (s16)mask;
1378 else if (ctxt->src.bytes == 4)
1379 sv = (s32)ctxt->src.val & (s32)mask;
1381 sv = (s64)ctxt->src.val & (s64)mask;
1383 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1384 ctxt->dst.addr.mem.ea + (sv >> 3));
1387 /* only subword offset */
1388 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1391 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1392 unsigned long addr, void *dest, unsigned size)
1395 struct read_cache *mc = &ctxt->mem_read;
1397 if (mc->pos < mc->end)
1400 WARN_ON((mc->end + size) >= sizeof(mc->data));
1402 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1404 if (rc != X86EMUL_CONTINUE)
1410 memcpy(dest, mc->data + mc->pos, size);
1412 return X86EMUL_CONTINUE;
1415 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1416 struct segmented_address addr,
1423 rc = linearize(ctxt, addr, size, false, &linear);
1424 if (rc != X86EMUL_CONTINUE)
1426 return read_emulated(ctxt, linear, data, size);
1429 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1430 struct segmented_address addr,
1437 rc = linearize(ctxt, addr, size, true, &linear);
1438 if (rc != X86EMUL_CONTINUE)
1440 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1444 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1445 struct segmented_address addr,
1446 const void *orig_data, const void *data,
1452 rc = linearize(ctxt, addr, size, true, &linear);
1453 if (rc != X86EMUL_CONTINUE)
1455 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1456 size, &ctxt->exception);
1459 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1460 unsigned int size, unsigned short port,
1463 struct read_cache *rc = &ctxt->io_read;
1465 if (rc->pos == rc->end) { /* refill pio read ahead */
1466 unsigned int in_page, n;
1467 unsigned int count = ctxt->rep_prefix ?
1468 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1469 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1470 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1471 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1472 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1475 rc->pos = rc->end = 0;
1476 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1481 if (ctxt->rep_prefix && (ctxt->d & String) &&
1482 !(ctxt->eflags & X86_EFLAGS_DF)) {
1483 ctxt->dst.data = rc->data + rc->pos;
1484 ctxt->dst.type = OP_MEM_STR;
1485 ctxt->dst.count = (rc->end - rc->pos) / size;
1488 memcpy(dest, rc->data + rc->pos, size);
1494 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1495 u16 index, struct desc_struct *desc)
1500 ctxt->ops->get_idt(ctxt, &dt);
1502 if (dt.size < index * 8 + 7)
1503 return emulate_gp(ctxt, index << 3 | 0x2);
1505 addr = dt.address + index * 8;
1506 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1510 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1511 u16 selector, struct desc_ptr *dt)
1513 const struct x86_emulate_ops *ops = ctxt->ops;
1516 if (selector & 1 << 2) {
1517 struct desc_struct desc;
1520 memset (dt, 0, sizeof *dt);
1521 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1525 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1526 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1528 ops->get_gdt(ctxt, dt);
1531 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1532 u16 selector, ulong *desc_addr_p)
1535 u16 index = selector >> 3;
1538 get_descriptor_table_ptr(ctxt, selector, &dt);
1540 if (dt.size < index * 8 + 7)
1541 return emulate_gp(ctxt, selector & 0xfffc);
1543 addr = dt.address + index * 8;
1545 #ifdef CONFIG_X86_64
1546 if (addr >> 32 != 0) {
1549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1550 if (!(efer & EFER_LMA))
1555 *desc_addr_p = addr;
1556 return X86EMUL_CONTINUE;
1559 /* allowed just for 8 bytes segments */
1560 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1561 u16 selector, struct desc_struct *desc,
1566 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1567 if (rc != X86EMUL_CONTINUE)
1570 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1574 /* allowed just for 8 bytes segments */
1575 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1576 u16 selector, struct desc_struct *desc)
1581 rc = get_descriptor_ptr(ctxt, selector, &addr);
1582 if (rc != X86EMUL_CONTINUE)
1585 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1589 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1590 u16 selector, int seg, u8 cpl,
1591 enum x86_transfer_type transfer,
1592 struct desc_struct *desc)
1594 struct desc_struct seg_desc, old_desc;
1596 unsigned err_vec = GP_VECTOR;
1598 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1604 memset(&seg_desc, 0, sizeof seg_desc);
1606 if (ctxt->mode == X86EMUL_MODE_REAL) {
1607 /* set real mode segment descriptor (keep limit etc. for
1609 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1610 set_desc_base(&seg_desc, selector << 4);
1612 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1613 /* VM86 needs a clean new segment descriptor */
1614 set_desc_base(&seg_desc, selector << 4);
1615 set_desc_limit(&seg_desc, 0xffff);
1625 /* TR should be in GDT only */
1626 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1629 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1630 if (null_selector) {
1631 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1634 if (seg == VCPU_SREG_SS) {
1635 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1639 * ctxt->ops->set_segment expects the CPL to be in
1640 * SS.DPL, so fake an expand-up 32-bit data segment.
1650 /* Skip all following checks */
1654 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1655 if (ret != X86EMUL_CONTINUE)
1658 err_code = selector & 0xfffc;
1659 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1662 /* can't load system descriptor into segment selector */
1663 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1664 if (transfer == X86_TRANSFER_CALL_JMP)
1665 return X86EMUL_UNHANDLEABLE;
1670 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1679 * segment is not a writable data segment or segment
1680 * selector's RPL != CPL or segment selector's RPL != CPL
1682 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1686 if (!(seg_desc.type & 8))
1689 if (seg_desc.type & 4) {
1695 if (rpl > cpl || dpl != cpl)
1698 /* in long-mode d/b must be clear if l is set */
1699 if (seg_desc.d && seg_desc.l) {
1702 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1703 if (efer & EFER_LMA)
1707 /* CS(RPL) <- CPL */
1708 selector = (selector & 0xfffc) | cpl;
1711 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1713 old_desc = seg_desc;
1714 seg_desc.type |= 2; /* busy */
1715 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1716 sizeof(seg_desc), &ctxt->exception);
1717 if (ret != X86EMUL_CONTINUE)
1720 case VCPU_SREG_LDTR:
1721 if (seg_desc.s || seg_desc.type != 2)
1724 default: /* DS, ES, FS, or GS */
1726 * segment is not a data or readable code segment or
1727 * ((segment is a data or nonconforming code segment)
1728 * and (both RPL and CPL > DPL))
1730 if ((seg_desc.type & 0xa) == 0x8 ||
1731 (((seg_desc.type & 0xc) != 0xc) &&
1732 (rpl > dpl && cpl > dpl)))
1738 /* mark segment as accessed */
1739 if (!(seg_desc.type & 1)) {
1741 ret = write_segment_descriptor(ctxt, selector,
1743 if (ret != X86EMUL_CONTINUE)
1746 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1747 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1748 sizeof(base3), &ctxt->exception);
1749 if (ret != X86EMUL_CONTINUE)
1751 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1752 ((u64)base3 << 32)))
1753 return emulate_gp(ctxt, 0);
1756 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1759 return X86EMUL_CONTINUE;
1761 return emulate_exception(ctxt, err_vec, err_code, true);
1764 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1765 u16 selector, int seg)
1767 u8 cpl = ctxt->ops->cpl(ctxt);
1770 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1771 * they can load it at CPL<3 (Intel's manual says only LSS can,
1774 * However, the Intel manual says that putting IST=1/DPL=3 in
1775 * an interrupt gate will result in SS=3 (the AMD manual instead
1776 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1777 * and only forbid it here.
1779 if (seg == VCPU_SREG_SS && selector == 3 &&
1780 ctxt->mode == X86EMUL_MODE_PROT64)
1781 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1783 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1784 X86_TRANSFER_NONE, NULL);
1787 static void write_register_operand(struct operand *op)
1789 return assign_register(op->addr.reg, op->val, op->bytes);
1792 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1796 write_register_operand(op);
1799 if (ctxt->lock_prefix)
1800 return segmented_cmpxchg(ctxt,
1806 return segmented_write(ctxt,
1812 return segmented_write(ctxt,
1815 op->bytes * op->count);
1818 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1821 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1829 return X86EMUL_CONTINUE;
1832 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1834 struct segmented_address addr;
1836 rsp_increment(ctxt, -bytes);
1837 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1838 addr.seg = VCPU_SREG_SS;
1840 return segmented_write(ctxt, addr, data, bytes);
1843 static int em_push(struct x86_emulate_ctxt *ctxt)
1845 /* Disable writeback. */
1846 ctxt->dst.type = OP_NONE;
1847 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1850 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1851 void *dest, int len)
1854 struct segmented_address addr;
1856 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1857 addr.seg = VCPU_SREG_SS;
1858 rc = segmented_read(ctxt, addr, dest, len);
1859 if (rc != X86EMUL_CONTINUE)
1862 rsp_increment(ctxt, len);
1866 static int em_pop(struct x86_emulate_ctxt *ctxt)
1868 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1871 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1872 void *dest, int len)
1875 unsigned long val, change_mask;
1876 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1877 int cpl = ctxt->ops->cpl(ctxt);
1879 rc = emulate_pop(ctxt, &val, len);
1880 if (rc != X86EMUL_CONTINUE)
1883 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1884 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1885 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1886 X86_EFLAGS_AC | X86_EFLAGS_ID;
1888 switch(ctxt->mode) {
1889 case X86EMUL_MODE_PROT64:
1890 case X86EMUL_MODE_PROT32:
1891 case X86EMUL_MODE_PROT16:
1893 change_mask |= X86_EFLAGS_IOPL;
1895 change_mask |= X86_EFLAGS_IF;
1897 case X86EMUL_MODE_VM86:
1899 return emulate_gp(ctxt, 0);
1900 change_mask |= X86_EFLAGS_IF;
1902 default: /* real mode */
1903 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1907 *(unsigned long *)dest =
1908 (ctxt->eflags & ~change_mask) | (val & change_mask);
1913 static int em_popf(struct x86_emulate_ctxt *ctxt)
1915 ctxt->dst.type = OP_REG;
1916 ctxt->dst.addr.reg = &ctxt->eflags;
1917 ctxt->dst.bytes = ctxt->op_bytes;
1918 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1921 static int em_enter(struct x86_emulate_ctxt *ctxt)
1924 unsigned frame_size = ctxt->src.val;
1925 unsigned nesting_level = ctxt->src2.val & 31;
1929 return X86EMUL_UNHANDLEABLE;
1931 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1932 rc = push(ctxt, &rbp, stack_size(ctxt));
1933 if (rc != X86EMUL_CONTINUE)
1935 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1938 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1940 return X86EMUL_CONTINUE;
1943 static int em_leave(struct x86_emulate_ctxt *ctxt)
1945 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1947 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1950 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1952 int seg = ctxt->src2.val;
1954 ctxt->src.val = get_segment_selector(ctxt, seg);
1955 if (ctxt->op_bytes == 4) {
1956 rsp_increment(ctxt, -2);
1960 return em_push(ctxt);
1963 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1965 int seg = ctxt->src2.val;
1966 unsigned long selector;
1969 rc = emulate_pop(ctxt, &selector, 2);
1970 if (rc != X86EMUL_CONTINUE)
1973 if (ctxt->modrm_reg == VCPU_SREG_SS)
1974 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1975 if (ctxt->op_bytes > 2)
1976 rsp_increment(ctxt, ctxt->op_bytes - 2);
1978 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1982 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1984 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1985 int rc = X86EMUL_CONTINUE;
1986 int reg = VCPU_REGS_RAX;
1988 while (reg <= VCPU_REGS_RDI) {
1989 (reg == VCPU_REGS_RSP) ?
1990 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1993 if (rc != X86EMUL_CONTINUE)
2002 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2004 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2005 return em_push(ctxt);
2008 static int em_popa(struct x86_emulate_ctxt *ctxt)
2010 int rc = X86EMUL_CONTINUE;
2011 int reg = VCPU_REGS_RDI;
2014 while (reg >= VCPU_REGS_RAX) {
2015 if (reg == VCPU_REGS_RSP) {
2016 rsp_increment(ctxt, ctxt->op_bytes);
2020 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2021 if (rc != X86EMUL_CONTINUE)
2023 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2029 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2031 const struct x86_emulate_ops *ops = ctxt->ops;
2038 /* TODO: Add limit checks */
2039 ctxt->src.val = ctxt->eflags;
2041 if (rc != X86EMUL_CONTINUE)
2044 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2046 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2048 if (rc != X86EMUL_CONTINUE)
2051 ctxt->src.val = ctxt->_eip;
2053 if (rc != X86EMUL_CONTINUE)
2056 ops->get_idt(ctxt, &dt);
2058 eip_addr = dt.address + (irq << 2);
2059 cs_addr = dt.address + (irq << 2) + 2;
2061 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2062 if (rc != X86EMUL_CONTINUE)
2065 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2066 if (rc != X86EMUL_CONTINUE)
2069 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2070 if (rc != X86EMUL_CONTINUE)
2078 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2082 invalidate_registers(ctxt);
2083 rc = __emulate_int_real(ctxt, irq);
2084 if (rc == X86EMUL_CONTINUE)
2085 writeback_registers(ctxt);
2089 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2091 switch(ctxt->mode) {
2092 case X86EMUL_MODE_REAL:
2093 return __emulate_int_real(ctxt, irq);
2094 case X86EMUL_MODE_VM86:
2095 case X86EMUL_MODE_PROT16:
2096 case X86EMUL_MODE_PROT32:
2097 case X86EMUL_MODE_PROT64:
2099 /* Protected mode interrupts unimplemented yet */
2100 return X86EMUL_UNHANDLEABLE;
2104 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2106 int rc = X86EMUL_CONTINUE;
2107 unsigned long temp_eip = 0;
2108 unsigned long temp_eflags = 0;
2109 unsigned long cs = 0;
2110 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2111 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2112 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2113 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2114 X86_EFLAGS_AC | X86_EFLAGS_ID |
2116 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2119 /* TODO: Add stack limit check */
2121 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2123 if (rc != X86EMUL_CONTINUE)
2126 if (temp_eip & ~0xffff)
2127 return emulate_gp(ctxt, 0);
2129 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2131 if (rc != X86EMUL_CONTINUE)
2134 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2136 if (rc != X86EMUL_CONTINUE)
2139 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2141 if (rc != X86EMUL_CONTINUE)
2144 ctxt->_eip = temp_eip;
2146 if (ctxt->op_bytes == 4)
2147 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2148 else if (ctxt->op_bytes == 2) {
2149 ctxt->eflags &= ~0xffff;
2150 ctxt->eflags |= temp_eflags;
2153 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2154 ctxt->eflags |= X86_EFLAGS_FIXED;
2155 ctxt->ops->set_nmi_mask(ctxt, false);
2160 static int em_iret(struct x86_emulate_ctxt *ctxt)
2162 switch(ctxt->mode) {
2163 case X86EMUL_MODE_REAL:
2164 return emulate_iret_real(ctxt);
2165 case X86EMUL_MODE_VM86:
2166 case X86EMUL_MODE_PROT16:
2167 case X86EMUL_MODE_PROT32:
2168 case X86EMUL_MODE_PROT64:
2170 /* iret from protected mode unimplemented yet */
2171 return X86EMUL_UNHANDLEABLE;
2175 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2179 struct desc_struct new_desc;
2180 u8 cpl = ctxt->ops->cpl(ctxt);
2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2184 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2185 X86_TRANSFER_CALL_JMP,
2187 if (rc != X86EMUL_CONTINUE)
2190 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2191 /* Error handling is not implemented. */
2192 if (rc != X86EMUL_CONTINUE)
2193 return X86EMUL_UNHANDLEABLE;
2198 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2200 return assign_eip_near(ctxt, ctxt->src.val);
2203 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2208 old_eip = ctxt->_eip;
2209 rc = assign_eip_near(ctxt, ctxt->src.val);
2210 if (rc != X86EMUL_CONTINUE)
2212 ctxt->src.val = old_eip;
2217 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2219 u64 old = ctxt->dst.orig_val64;
2221 if (ctxt->dst.bytes == 16)
2222 return X86EMUL_UNHANDLEABLE;
2224 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2225 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2226 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2227 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2228 ctxt->eflags &= ~X86_EFLAGS_ZF;
2230 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2231 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2233 ctxt->eflags |= X86_EFLAGS_ZF;
2235 return X86EMUL_CONTINUE;
2238 static int em_ret(struct x86_emulate_ctxt *ctxt)
2243 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2244 if (rc != X86EMUL_CONTINUE)
2247 return assign_eip_near(ctxt, eip);
2250 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2253 unsigned long eip, cs;
2254 int cpl = ctxt->ops->cpl(ctxt);
2255 struct desc_struct new_desc;
2257 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2258 if (rc != X86EMUL_CONTINUE)
2260 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2261 if (rc != X86EMUL_CONTINUE)
2263 /* Outer-privilege level return is not implemented */
2264 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2265 return X86EMUL_UNHANDLEABLE;
2266 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2269 if (rc != X86EMUL_CONTINUE)
2271 rc = assign_eip_far(ctxt, eip, &new_desc);
2272 /* Error handling is not implemented. */
2273 if (rc != X86EMUL_CONTINUE)
2274 return X86EMUL_UNHANDLEABLE;
2279 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2283 rc = em_ret_far(ctxt);
2284 if (rc != X86EMUL_CONTINUE)
2286 rsp_increment(ctxt, ctxt->src.val);
2287 return X86EMUL_CONTINUE;
2290 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2292 /* Save real source value, then compare EAX against destination. */
2293 ctxt->dst.orig_val = ctxt->dst.val;
2294 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2295 ctxt->src.orig_val = ctxt->src.val;
2296 ctxt->src.val = ctxt->dst.orig_val;
2297 fastop(ctxt, em_cmp);
2299 if (ctxt->eflags & X86_EFLAGS_ZF) {
2300 /* Success: write back to memory; no update of EAX */
2301 ctxt->src.type = OP_NONE;
2302 ctxt->dst.val = ctxt->src.orig_val;
2304 /* Failure: write the value we saw to EAX. */
2305 ctxt->src.type = OP_REG;
2306 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2307 ctxt->src.val = ctxt->dst.orig_val;
2308 /* Create write-cycle to dest by writing the same value */
2309 ctxt->dst.val = ctxt->dst.orig_val;
2311 return X86EMUL_CONTINUE;
2314 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2316 int seg = ctxt->src2.val;
2320 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2322 rc = load_segment_descriptor(ctxt, sel, seg);
2323 if (rc != X86EMUL_CONTINUE)
2326 ctxt->dst.val = ctxt->src.val;
2330 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2332 u32 eax, ebx, ecx, edx;
2336 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2337 return edx & bit(X86_FEATURE_LM);
2340 #define GET_SMSTATE(type, smbase, offset) \
2343 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2345 if (r != X86EMUL_CONTINUE) \
2346 return X86EMUL_UNHANDLEABLE; \
2350 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2352 desc->g = (flags >> 23) & 1;
2353 desc->d = (flags >> 22) & 1;
2354 desc->l = (flags >> 21) & 1;
2355 desc->avl = (flags >> 20) & 1;
2356 desc->p = (flags >> 15) & 1;
2357 desc->dpl = (flags >> 13) & 3;
2358 desc->s = (flags >> 12) & 1;
2359 desc->type = (flags >> 8) & 15;
2362 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2364 struct desc_struct desc;
2368 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2371 offset = 0x7f84 + n * 12;
2373 offset = 0x7f2c + (n - 3) * 12;
2375 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2376 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2377 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2378 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2379 return X86EMUL_CONTINUE;
2382 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2384 struct desc_struct desc;
2389 offset = 0x7e00 + n * 16;
2391 selector = GET_SMSTATE(u16, smbase, offset);
2392 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2393 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2394 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2395 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2397 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2398 return X86EMUL_CONTINUE;
2401 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2407 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2408 * Then enable protected mode. However, PCID cannot be enabled
2409 * if EFER.LMA=0, so set it separately.
2411 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2413 return X86EMUL_UNHANDLEABLE;
2415 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2417 return X86EMUL_UNHANDLEABLE;
2419 if (cr4 & X86_CR4_PCIDE) {
2420 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2422 return X86EMUL_UNHANDLEABLE;
2425 return X86EMUL_CONTINUE;
2428 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2430 struct desc_struct desc;
2436 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2437 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2438 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2439 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2441 for (i = 0; i < 8; i++)
2442 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2444 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2445 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2446 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2447 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2449 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2450 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2451 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2452 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2453 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2455 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2456 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2457 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2458 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2459 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2461 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2462 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2463 ctxt->ops->set_gdt(ctxt, &dt);
2465 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2466 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2467 ctxt->ops->set_idt(ctxt, &dt);
2469 for (i = 0; i < 6; i++) {
2470 int r = rsm_load_seg_32(ctxt, smbase, i);
2471 if (r != X86EMUL_CONTINUE)
2475 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2477 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2479 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2482 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2484 struct desc_struct desc;
2491 for (i = 0; i < 16; i++)
2492 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2494 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2495 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2497 val = GET_SMSTATE(u32, smbase, 0x7f68);
2498 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2499 val = GET_SMSTATE(u32, smbase, 0x7f60);
2500 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2502 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2503 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2504 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2505 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2506 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2507 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2509 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2510 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2511 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2512 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2513 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2514 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2516 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2517 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2518 ctxt->ops->set_idt(ctxt, &dt);
2520 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2521 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2522 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2523 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2524 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2525 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2527 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2528 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2529 ctxt->ops->set_gdt(ctxt, &dt);
2531 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2532 if (r != X86EMUL_CONTINUE)
2535 for (i = 0; i < 6; i++) {
2536 r = rsm_load_seg_64(ctxt, smbase, i);
2537 if (r != X86EMUL_CONTINUE)
2541 return X86EMUL_CONTINUE;
2544 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2546 unsigned long cr0, cr4, efer;
2550 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2551 return emulate_ud(ctxt);
2554 * Get back to real mode, to prepare a safe state in which to load
2555 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2556 * supports long mode.
2558 cr4 = ctxt->ops->get_cr(ctxt, 4);
2559 if (emulator_has_longmode(ctxt)) {
2560 struct desc_struct cs_desc;
2562 /* Zero CR4.PCIDE before CR0.PG. */
2563 if (cr4 & X86_CR4_PCIDE) {
2564 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2565 cr4 &= ~X86_CR4_PCIDE;
2568 /* A 32-bit code segment is required to clear EFER.LMA. */
2569 memset(&cs_desc, 0, sizeof(cs_desc));
2571 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2572 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2575 /* For the 64-bit case, this will clear EFER.LMA. */
2576 cr0 = ctxt->ops->get_cr(ctxt, 0);
2577 if (cr0 & X86_CR0_PE)
2578 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2580 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2581 if (cr4 & X86_CR4_PAE)
2582 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2584 /* And finally go back to 32-bit mode. */
2586 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2588 smbase = ctxt->ops->get_smbase(ctxt);
2589 if (emulator_has_longmode(ctxt))
2590 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2592 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2594 if (ret != X86EMUL_CONTINUE) {
2595 /* FIXME: should triple fault */
2596 return X86EMUL_UNHANDLEABLE;
2599 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2600 ctxt->ops->set_nmi_mask(ctxt, false);
2602 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2603 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2604 return X86EMUL_CONTINUE;
2608 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2609 struct desc_struct *cs, struct desc_struct *ss)
2611 cs->l = 0; /* will be adjusted later */
2612 set_desc_base(cs, 0); /* flat segment */
2613 cs->g = 1; /* 4kb granularity */
2614 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2615 cs->type = 0x0b; /* Read, Execute, Accessed */
2617 cs->dpl = 0; /* will be adjusted later */
2622 set_desc_base(ss, 0); /* flat segment */
2623 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2624 ss->g = 1; /* 4kb granularity */
2626 ss->type = 0x03; /* Read/Write, Accessed */
2627 ss->d = 1; /* 32bit stack segment */
2634 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2636 u32 eax, ebx, ecx, edx;
2639 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2640 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2641 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2642 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2645 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2647 const struct x86_emulate_ops *ops = ctxt->ops;
2648 u32 eax, ebx, ecx, edx;
2651 * syscall should always be enabled in longmode - so only become
2652 * vendor specific (cpuid) if other modes are active...
2654 if (ctxt->mode == X86EMUL_MODE_PROT64)
2659 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2661 * Intel ("GenuineIntel")
2662 * remark: Intel CPUs only support "syscall" in 64bit
2663 * longmode. Also an 64bit guest with a
2664 * 32bit compat-app running will #UD !! While this
2665 * behaviour can be fixed (by emulating) into AMD
2666 * response - CPUs of AMD can't behave like Intel.
2668 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2669 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2670 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2673 /* AMD ("AuthenticAMD") */
2674 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2675 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2676 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2679 /* AMD ("AMDisbetter!") */
2680 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2681 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2682 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2685 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2689 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2691 const struct x86_emulate_ops *ops = ctxt->ops;
2692 struct desc_struct cs, ss;
2697 /* syscall is not available in real mode */
2698 if (ctxt->mode == X86EMUL_MODE_REAL ||
2699 ctxt->mode == X86EMUL_MODE_VM86)
2700 return emulate_ud(ctxt);
2702 if (!(em_syscall_is_enabled(ctxt)))
2703 return emulate_ud(ctxt);
2705 ops->get_msr(ctxt, MSR_EFER, &efer);
2706 setup_syscalls_segments(ctxt, &cs, &ss);
2708 if (!(efer & EFER_SCE))
2709 return emulate_ud(ctxt);
2711 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2713 cs_sel = (u16)(msr_data & 0xfffc);
2714 ss_sel = (u16)(msr_data + 8);
2716 if (efer & EFER_LMA) {
2720 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2721 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2723 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2724 if (efer & EFER_LMA) {
2725 #ifdef CONFIG_X86_64
2726 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2729 ctxt->mode == X86EMUL_MODE_PROT64 ?
2730 MSR_LSTAR : MSR_CSTAR, &msr_data);
2731 ctxt->_eip = msr_data;
2733 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2734 ctxt->eflags &= ~msr_data;
2735 ctxt->eflags |= X86_EFLAGS_FIXED;
2739 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2740 ctxt->_eip = (u32)msr_data;
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2745 return X86EMUL_CONTINUE;
2748 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2750 const struct x86_emulate_ops *ops = ctxt->ops;
2751 struct desc_struct cs, ss;
2756 ops->get_msr(ctxt, MSR_EFER, &efer);
2757 /* inject #GP if in real mode */
2758 if (ctxt->mode == X86EMUL_MODE_REAL)
2759 return emulate_gp(ctxt, 0);
2762 * Not recognized on AMD in compat mode (but is recognized in legacy
2765 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2766 && !vendor_intel(ctxt))
2767 return emulate_ud(ctxt);
2769 /* sysenter/sysexit have not been tested in 64bit mode. */
2770 if (ctxt->mode == X86EMUL_MODE_PROT64)
2771 return X86EMUL_UNHANDLEABLE;
2773 setup_syscalls_segments(ctxt, &cs, &ss);
2775 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2776 if ((msr_data & 0xfffc) == 0x0)
2777 return emulate_gp(ctxt, 0);
2779 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2780 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2781 ss_sel = cs_sel + 8;
2782 if (efer & EFER_LMA) {
2787 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2788 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2790 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2791 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2793 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2794 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2797 return X86EMUL_CONTINUE;
2800 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct cs, ss;
2804 u64 msr_data, rcx, rdx;
2806 u16 cs_sel = 0, ss_sel = 0;
2808 /* inject #GP if in real mode or Virtual 8086 mode */
2809 if (ctxt->mode == X86EMUL_MODE_REAL ||
2810 ctxt->mode == X86EMUL_MODE_VM86)
2811 return emulate_gp(ctxt, 0);
2813 setup_syscalls_segments(ctxt, &cs, &ss);
2815 if ((ctxt->rex_prefix & 0x8) != 0x0)
2816 usermode = X86EMUL_MODE_PROT64;
2818 usermode = X86EMUL_MODE_PROT32;
2820 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2821 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2825 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2827 case X86EMUL_MODE_PROT32:
2828 cs_sel = (u16)(msr_data + 16);
2829 if ((msr_data & 0xfffc) == 0x0)
2830 return emulate_gp(ctxt, 0);
2831 ss_sel = (u16)(msr_data + 24);
2835 case X86EMUL_MODE_PROT64:
2836 cs_sel = (u16)(msr_data + 32);
2837 if (msr_data == 0x0)
2838 return emulate_gp(ctxt, 0);
2839 ss_sel = cs_sel + 8;
2842 if (is_noncanonical_address(rcx) ||
2843 is_noncanonical_address(rdx))
2844 return emulate_gp(ctxt, 0);
2847 cs_sel |= SEGMENT_RPL_MASK;
2848 ss_sel |= SEGMENT_RPL_MASK;
2850 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2851 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2854 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2856 return X86EMUL_CONTINUE;
2859 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2862 if (ctxt->mode == X86EMUL_MODE_REAL)
2864 if (ctxt->mode == X86EMUL_MODE_VM86)
2866 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2867 return ctxt->ops->cpl(ctxt) > iopl;
2870 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2873 const struct x86_emulate_ops *ops = ctxt->ops;
2874 struct desc_struct tr_seg;
2877 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2878 unsigned mask = (1 << len) - 1;
2881 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2884 if (desc_limit_scaled(&tr_seg) < 103)
2886 base = get_desc_base(&tr_seg);
2887 #ifdef CONFIG_X86_64
2888 base |= ((u64)base3) << 32;
2890 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2891 if (r != X86EMUL_CONTINUE)
2893 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2895 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2896 if (r != X86EMUL_CONTINUE)
2898 if ((perm >> bit_idx) & mask)
2903 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2909 if (emulator_bad_iopl(ctxt))
2910 if (!emulator_io_port_access_allowed(ctxt, port, len))
2913 ctxt->perm_ok = true;
2918 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2921 * Intel CPUs mask the counter and pointers in quite strange
2922 * manner when ECX is zero due to REP-string optimizations.
2924 #ifdef CONFIG_X86_64
2925 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2928 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2931 case 0xa4: /* movsb */
2932 case 0xa5: /* movsd/w */
2933 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2935 case 0xaa: /* stosb */
2936 case 0xab: /* stosd/w */
2937 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2942 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2943 struct tss_segment_16 *tss)
2945 tss->ip = ctxt->_eip;
2946 tss->flag = ctxt->eflags;
2947 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2948 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2949 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2950 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2951 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2952 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2953 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2954 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2956 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2957 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2958 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2959 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2960 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2963 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2964 struct tss_segment_16 *tss)
2969 ctxt->_eip = tss->ip;
2970 ctxt->eflags = tss->flag | 2;
2971 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2972 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2973 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2974 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2975 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2976 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2977 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2978 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2981 * SDM says that segment selectors are loaded before segment
2984 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2985 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2986 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2987 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2988 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2993 * Now load segment descriptors. If fault happens at this stage
2994 * it is handled in a context of new task
2996 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2997 X86_TRANSFER_TASK_SWITCH, NULL);
2998 if (ret != X86EMUL_CONTINUE)
3000 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3001 X86_TRANSFER_TASK_SWITCH, NULL);
3002 if (ret != X86EMUL_CONTINUE)
3004 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3005 X86_TRANSFER_TASK_SWITCH, NULL);
3006 if (ret != X86EMUL_CONTINUE)
3008 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3009 X86_TRANSFER_TASK_SWITCH, NULL);
3010 if (ret != X86EMUL_CONTINUE)
3012 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3013 X86_TRANSFER_TASK_SWITCH, NULL);
3014 if (ret != X86EMUL_CONTINUE)
3017 return X86EMUL_CONTINUE;
3020 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3021 u16 tss_selector, u16 old_tss_sel,
3022 ulong old_tss_base, struct desc_struct *new_desc)
3024 const struct x86_emulate_ops *ops = ctxt->ops;
3025 struct tss_segment_16 tss_seg;
3027 u32 new_tss_base = get_desc_base(new_desc);
3029 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3031 if (ret != X86EMUL_CONTINUE)
3034 save_state_to_tss16(ctxt, &tss_seg);
3036 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3038 if (ret != X86EMUL_CONTINUE)
3041 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3043 if (ret != X86EMUL_CONTINUE)
3046 if (old_tss_sel != 0xffff) {
3047 tss_seg.prev_task_link = old_tss_sel;
3049 ret = ops->write_std(ctxt, new_tss_base,
3050 &tss_seg.prev_task_link,
3051 sizeof tss_seg.prev_task_link,
3053 if (ret != X86EMUL_CONTINUE)
3057 return load_state_from_tss16(ctxt, &tss_seg);
3060 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3061 struct tss_segment_32 *tss)
3063 /* CR3 and ldt selector are not saved intentionally */
3064 tss->eip = ctxt->_eip;
3065 tss->eflags = ctxt->eflags;
3066 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3067 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3068 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3069 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3070 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3071 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3072 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3073 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3075 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3076 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3077 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3078 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3079 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3080 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3083 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3084 struct tss_segment_32 *tss)
3089 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3090 return emulate_gp(ctxt, 0);
3091 ctxt->_eip = tss->eip;
3092 ctxt->eflags = tss->eflags | 2;
3094 /* General purpose registers */
3095 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3096 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3097 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3098 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3099 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3100 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3101 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3102 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3105 * SDM says that segment selectors are loaded before segment
3106 * descriptors. This is important because CPL checks will
3109 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3110 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3111 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3112 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3113 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3114 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3115 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3118 * If we're switching between Protected Mode and VM86, we need to make
3119 * sure to update the mode before loading the segment descriptors so
3120 * that the selectors are interpreted correctly.
3122 if (ctxt->eflags & X86_EFLAGS_VM) {
3123 ctxt->mode = X86EMUL_MODE_VM86;
3126 ctxt->mode = X86EMUL_MODE_PROT32;
3131 * Now load segment descriptors. If fault happenes at this stage
3132 * it is handled in a context of new task
3134 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3135 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3136 if (ret != X86EMUL_CONTINUE)
3138 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3139 X86_TRANSFER_TASK_SWITCH, NULL);
3140 if (ret != X86EMUL_CONTINUE)
3142 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3143 X86_TRANSFER_TASK_SWITCH, NULL);
3144 if (ret != X86EMUL_CONTINUE)
3146 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3147 X86_TRANSFER_TASK_SWITCH, NULL);
3148 if (ret != X86EMUL_CONTINUE)
3150 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3151 X86_TRANSFER_TASK_SWITCH, NULL);
3152 if (ret != X86EMUL_CONTINUE)
3154 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3155 X86_TRANSFER_TASK_SWITCH, NULL);
3156 if (ret != X86EMUL_CONTINUE)
3158 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3159 X86_TRANSFER_TASK_SWITCH, NULL);
3164 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3165 u16 tss_selector, u16 old_tss_sel,
3166 ulong old_tss_base, struct desc_struct *new_desc)
3168 const struct x86_emulate_ops *ops = ctxt->ops;
3169 struct tss_segment_32 tss_seg;
3171 u32 new_tss_base = get_desc_base(new_desc);
3172 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3173 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3175 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3177 if (ret != X86EMUL_CONTINUE)
3180 save_state_to_tss32(ctxt, &tss_seg);
3182 /* Only GP registers and segment selectors are saved */
3183 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3184 ldt_sel_offset - eip_offset, &ctxt->exception);
3185 if (ret != X86EMUL_CONTINUE)
3188 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3190 if (ret != X86EMUL_CONTINUE)
3193 if (old_tss_sel != 0xffff) {
3194 tss_seg.prev_task_link = old_tss_sel;
3196 ret = ops->write_std(ctxt, new_tss_base,
3197 &tss_seg.prev_task_link,
3198 sizeof tss_seg.prev_task_link,
3200 if (ret != X86EMUL_CONTINUE)
3204 return load_state_from_tss32(ctxt, &tss_seg);
3207 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3208 u16 tss_selector, int idt_index, int reason,
3209 bool has_error_code, u32 error_code)
3211 const struct x86_emulate_ops *ops = ctxt->ops;
3212 struct desc_struct curr_tss_desc, next_tss_desc;
3214 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3215 ulong old_tss_base =
3216 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3218 ulong desc_addr, dr7;
3220 /* FIXME: old_tss_base == ~0 ? */
3222 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3223 if (ret != X86EMUL_CONTINUE)
3225 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3226 if (ret != X86EMUL_CONTINUE)
3229 /* FIXME: check that next_tss_desc is tss */
3232 * Check privileges. The three cases are task switch caused by...
3234 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3235 * 2. Exception/IRQ/iret: No check is performed
3236 * 3. jmp/call to TSS/task-gate: No check is performed since the
3237 * hardware checks it before exiting.
3239 if (reason == TASK_SWITCH_GATE) {
3240 if (idt_index != -1) {
3241 /* Software interrupts */
3242 struct desc_struct task_gate_desc;
3245 ret = read_interrupt_descriptor(ctxt, idt_index,
3247 if (ret != X86EMUL_CONTINUE)
3250 dpl = task_gate_desc.dpl;
3251 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3252 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3256 desc_limit = desc_limit_scaled(&next_tss_desc);
3257 if (!next_tss_desc.p ||
3258 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3259 desc_limit < 0x2b)) {
3260 return emulate_ts(ctxt, tss_selector & 0xfffc);
3263 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3264 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3265 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3268 if (reason == TASK_SWITCH_IRET)
3269 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3271 /* set back link to prev task only if NT bit is set in eflags
3272 note that old_tss_sel is not used after this point */
3273 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3274 old_tss_sel = 0xffff;
3276 if (next_tss_desc.type & 8)
3277 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3278 old_tss_base, &next_tss_desc);
3280 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3281 old_tss_base, &next_tss_desc);
3282 if (ret != X86EMUL_CONTINUE)
3285 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3286 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3288 if (reason != TASK_SWITCH_IRET) {
3289 next_tss_desc.type |= (1 << 1); /* set busy flag */
3290 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3293 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3294 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3296 if (has_error_code) {
3297 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3298 ctxt->lock_prefix = 0;
3299 ctxt->src.val = (unsigned long) error_code;
3300 ret = em_push(ctxt);
3303 ops->get_dr(ctxt, 7, &dr7);
3304 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3309 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3310 u16 tss_selector, int idt_index, int reason,
3311 bool has_error_code, u32 error_code)
3315 invalidate_registers(ctxt);
3316 ctxt->_eip = ctxt->eip;
3317 ctxt->dst.type = OP_NONE;
3319 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3320 has_error_code, error_code);
3322 if (rc == X86EMUL_CONTINUE) {
3323 ctxt->eip = ctxt->_eip;
3324 writeback_registers(ctxt);
3327 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3330 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3333 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3335 register_address_increment(ctxt, reg, df * op->bytes);
3336 op->addr.mem.ea = register_address(ctxt, reg);
3339 static int em_das(struct x86_emulate_ctxt *ctxt)
3342 bool af, cf, old_cf;
3344 cf = ctxt->eflags & X86_EFLAGS_CF;
3350 af = ctxt->eflags & X86_EFLAGS_AF;
3351 if ((al & 0x0f) > 9 || af) {
3353 cf = old_cf | (al >= 250);
3358 if (old_al > 0x99 || old_cf) {
3364 /* Set PF, ZF, SF */
3365 ctxt->src.type = OP_IMM;
3367 ctxt->src.bytes = 1;
3368 fastop(ctxt, em_or);
3369 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3371 ctxt->eflags |= X86_EFLAGS_CF;
3373 ctxt->eflags |= X86_EFLAGS_AF;
3374 return X86EMUL_CONTINUE;
3377 static int em_aam(struct x86_emulate_ctxt *ctxt)
3381 if (ctxt->src.val == 0)
3382 return emulate_de(ctxt);
3384 al = ctxt->dst.val & 0xff;
3385 ah = al / ctxt->src.val;
3386 al %= ctxt->src.val;
3388 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3390 /* Set PF, ZF, SF */
3391 ctxt->src.type = OP_IMM;
3393 ctxt->src.bytes = 1;
3394 fastop(ctxt, em_or);
3396 return X86EMUL_CONTINUE;
3399 static int em_aad(struct x86_emulate_ctxt *ctxt)
3401 u8 al = ctxt->dst.val & 0xff;
3402 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3404 al = (al + (ah * ctxt->src.val)) & 0xff;
3406 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3408 /* Set PF, ZF, SF */
3409 ctxt->src.type = OP_IMM;
3411 ctxt->src.bytes = 1;
3412 fastop(ctxt, em_or);
3414 return X86EMUL_CONTINUE;
3417 static int em_call(struct x86_emulate_ctxt *ctxt)
3420 long rel = ctxt->src.val;
3422 ctxt->src.val = (unsigned long)ctxt->_eip;
3423 rc = jmp_rel(ctxt, rel);
3424 if (rc != X86EMUL_CONTINUE)
3426 return em_push(ctxt);
3429 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3434 struct desc_struct old_desc, new_desc;
3435 const struct x86_emulate_ops *ops = ctxt->ops;
3436 int cpl = ctxt->ops->cpl(ctxt);
3437 enum x86emul_mode prev_mode = ctxt->mode;
3439 old_eip = ctxt->_eip;
3440 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3442 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3443 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3444 X86_TRANSFER_CALL_JMP, &new_desc);
3445 if (rc != X86EMUL_CONTINUE)
3448 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3449 if (rc != X86EMUL_CONTINUE)
3452 ctxt->src.val = old_cs;
3454 if (rc != X86EMUL_CONTINUE)
3457 ctxt->src.val = old_eip;
3459 /* If we failed, we tainted the memory, but the very least we should
3461 if (rc != X86EMUL_CONTINUE) {
3462 pr_warn_once("faulting far call emulation tainted memory\n");
3467 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3468 ctxt->mode = prev_mode;
3473 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3478 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3479 if (rc != X86EMUL_CONTINUE)
3481 rc = assign_eip_near(ctxt, eip);
3482 if (rc != X86EMUL_CONTINUE)
3484 rsp_increment(ctxt, ctxt->src.val);
3485 return X86EMUL_CONTINUE;
3488 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3490 /* Write back the register source. */
3491 ctxt->src.val = ctxt->dst.val;
3492 write_register_operand(&ctxt->src);
3494 /* Write back the memory destination with implicit LOCK prefix. */
3495 ctxt->dst.val = ctxt->src.orig_val;
3496 ctxt->lock_prefix = 1;
3497 return X86EMUL_CONTINUE;
3500 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3502 ctxt->dst.val = ctxt->src2.val;
3503 return fastop(ctxt, em_imul);
3506 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3508 ctxt->dst.type = OP_REG;
3509 ctxt->dst.bytes = ctxt->src.bytes;
3510 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3511 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3513 return X86EMUL_CONTINUE;
3516 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3520 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3521 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3522 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3523 return X86EMUL_CONTINUE;
3526 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3530 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3531 return emulate_gp(ctxt, 0);
3532 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3533 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3534 return X86EMUL_CONTINUE;
3537 static int em_mov(struct x86_emulate_ctxt *ctxt)
3539 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3540 return X86EMUL_CONTINUE;
3543 #define FFL(x) bit(X86_FEATURE_##x)
3545 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3547 u32 ebx, ecx, edx, eax = 1;
3551 * Check MOVBE is set in the guest-visible CPUID leaf.
3553 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3554 if (!(ecx & FFL(MOVBE)))
3555 return emulate_ud(ctxt);
3557 switch (ctxt->op_bytes) {
3560 * From MOVBE definition: "...When the operand size is 16 bits,
3561 * the upper word of the destination register remains unchanged
3564 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3565 * rules so we have to do the operation almost per hand.
3567 tmp = (u16)ctxt->src.val;
3568 ctxt->dst.val &= ~0xffffUL;
3569 ctxt->dst.val |= (unsigned long)swab16(tmp);
3572 ctxt->dst.val = swab32((u32)ctxt->src.val);
3575 ctxt->dst.val = swab64(ctxt->src.val);
3580 return X86EMUL_CONTINUE;
3583 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3585 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3586 return emulate_gp(ctxt, 0);
3588 /* Disable writeback. */
3589 ctxt->dst.type = OP_NONE;
3590 return X86EMUL_CONTINUE;
3593 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3597 if (ctxt->mode == X86EMUL_MODE_PROT64)
3598 val = ctxt->src.val & ~0ULL;
3600 val = ctxt->src.val & ~0U;
3602 /* #UD condition is already handled. */
3603 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3604 return emulate_gp(ctxt, 0);
3606 /* Disable writeback. */
3607 ctxt->dst.type = OP_NONE;
3608 return X86EMUL_CONTINUE;
3611 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3615 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3616 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3617 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3618 return emulate_gp(ctxt, 0);
3620 return X86EMUL_CONTINUE;
3623 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3627 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3628 return emulate_gp(ctxt, 0);
3630 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3631 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3632 return X86EMUL_CONTINUE;
3635 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3637 if (ctxt->modrm_reg > VCPU_SREG_GS)
3638 return emulate_ud(ctxt);
3640 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3641 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3642 ctxt->dst.bytes = 2;
3643 return X86EMUL_CONTINUE;
3646 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3648 u16 sel = ctxt->src.val;
3650 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3651 return emulate_ud(ctxt);
3653 if (ctxt->modrm_reg == VCPU_SREG_SS)
3654 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3656 /* Disable writeback. */
3657 ctxt->dst.type = OP_NONE;
3658 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3661 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3663 u16 sel = ctxt->src.val;
3665 /* Disable writeback. */
3666 ctxt->dst.type = OP_NONE;
3667 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3670 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3672 u16 sel = ctxt->src.val;
3674 /* Disable writeback. */
3675 ctxt->dst.type = OP_NONE;
3676 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3679 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3684 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3685 if (rc == X86EMUL_CONTINUE)
3686 ctxt->ops->invlpg(ctxt, linear);
3687 /* Disable writeback. */
3688 ctxt->dst.type = OP_NONE;
3689 return X86EMUL_CONTINUE;
3692 static int em_clts(struct x86_emulate_ctxt *ctxt)
3696 cr0 = ctxt->ops->get_cr(ctxt, 0);
3698 ctxt->ops->set_cr(ctxt, 0, cr0);
3699 return X86EMUL_CONTINUE;
3702 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3704 int rc = ctxt->ops->fix_hypercall(ctxt);
3706 if (rc != X86EMUL_CONTINUE)
3709 /* Let the processor re-execute the fixed hypercall */
3710 ctxt->_eip = ctxt->eip;
3711 /* Disable writeback. */
3712 ctxt->dst.type = OP_NONE;
3713 return X86EMUL_CONTINUE;
3716 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3717 void (*get)(struct x86_emulate_ctxt *ctxt,
3718 struct desc_ptr *ptr))
3720 struct desc_ptr desc_ptr;
3722 if (ctxt->mode == X86EMUL_MODE_PROT64)
3724 get(ctxt, &desc_ptr);
3725 if (ctxt->op_bytes == 2) {
3727 desc_ptr.address &= 0x00ffffff;
3729 /* Disable writeback. */
3730 ctxt->dst.type = OP_NONE;
3731 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3732 &desc_ptr, 2 + ctxt->op_bytes);
3735 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3737 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3740 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3742 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3745 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3747 struct desc_ptr desc_ptr;
3750 if (ctxt->mode == X86EMUL_MODE_PROT64)
3752 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3753 &desc_ptr.size, &desc_ptr.address,
3755 if (rc != X86EMUL_CONTINUE)
3757 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3758 is_noncanonical_address(desc_ptr.address))
3759 return emulate_gp(ctxt, 0);
3761 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3763 ctxt->ops->set_idt(ctxt, &desc_ptr);
3764 /* Disable writeback. */
3765 ctxt->dst.type = OP_NONE;
3766 return X86EMUL_CONTINUE;
3769 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3771 return em_lgdt_lidt(ctxt, true);
3774 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3776 return em_lgdt_lidt(ctxt, false);
3779 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3781 if (ctxt->dst.type == OP_MEM)
3782 ctxt->dst.bytes = 2;
3783 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3784 return X86EMUL_CONTINUE;
3787 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3789 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3790 | (ctxt->src.val & 0x0f));
3791 ctxt->dst.type = OP_NONE;
3792 return X86EMUL_CONTINUE;
3795 static int em_loop(struct x86_emulate_ctxt *ctxt)
3797 int rc = X86EMUL_CONTINUE;
3799 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3800 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3801 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3802 rc = jmp_rel(ctxt, ctxt->src.val);
3807 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3809 int rc = X86EMUL_CONTINUE;
3811 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3812 rc = jmp_rel(ctxt, ctxt->src.val);
3817 static int em_in(struct x86_emulate_ctxt *ctxt)
3819 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3821 return X86EMUL_IO_NEEDED;
3823 return X86EMUL_CONTINUE;
3826 static int em_out(struct x86_emulate_ctxt *ctxt)
3828 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3830 /* Disable writeback. */
3831 ctxt->dst.type = OP_NONE;
3832 return X86EMUL_CONTINUE;
3835 static int em_cli(struct x86_emulate_ctxt *ctxt)
3837 if (emulator_bad_iopl(ctxt))
3838 return emulate_gp(ctxt, 0);
3840 ctxt->eflags &= ~X86_EFLAGS_IF;
3841 return X86EMUL_CONTINUE;
3844 static int em_sti(struct x86_emulate_ctxt *ctxt)
3846 if (emulator_bad_iopl(ctxt))
3847 return emulate_gp(ctxt, 0);
3849 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3850 ctxt->eflags |= X86_EFLAGS_IF;
3851 return X86EMUL_CONTINUE;
3854 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3856 u32 eax, ebx, ecx, edx;
3859 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3860 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3861 ctxt->ops->cpl(ctxt)) {
3862 return emulate_gp(ctxt, 0);
3865 eax = reg_read(ctxt, VCPU_REGS_RAX);
3866 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3867 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3868 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3869 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3870 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3871 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3872 return X86EMUL_CONTINUE;
3875 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3879 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3881 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3883 ctxt->eflags &= ~0xffUL;
3884 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3885 return X86EMUL_CONTINUE;
3888 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3890 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3891 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3892 return X86EMUL_CONTINUE;
3895 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3897 switch (ctxt->op_bytes) {
3898 #ifdef CONFIG_X86_64
3900 asm("bswap %0" : "+r"(ctxt->dst.val));
3904 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3907 return X86EMUL_CONTINUE;
3910 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3912 /* emulating clflush regardless of cpuid */
3913 return X86EMUL_CONTINUE;
3916 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3918 ctxt->dst.val = (s32) ctxt->src.val;
3919 return X86EMUL_CONTINUE;
3922 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3924 u32 eax = 1, ebx, ecx = 0, edx;
3926 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3927 if (!(edx & FFL(FXSR)))
3928 return emulate_ud(ctxt);
3930 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3931 return emulate_nm(ctxt);
3934 * Don't emulate a case that should never be hit, instead of working
3935 * around a lack of fxsave64/fxrstor64 on old compilers.
3937 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3938 return X86EMUL_UNHANDLEABLE;
3940 return X86EMUL_CONTINUE;
3944 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3947 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3948 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3950 * 3) 64-bit mode with REX.W prefix
3951 * - like (2), but XMM 8-15 are being saved and restored
3952 * 4) 64-bit mode without REX.W prefix
3953 * - like (3), but FIP and FDP are 64 bit
3955 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3956 * desired result. (4) is not emulated.
3958 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3959 * and FPU DS) should match.
3961 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3963 struct fxregs_state fx_state;
3967 rc = check_fxsr(ctxt);
3968 if (rc != X86EMUL_CONTINUE)
3971 ctxt->ops->get_fpu(ctxt);
3973 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3975 ctxt->ops->put_fpu(ctxt);
3977 if (rc != X86EMUL_CONTINUE)
3980 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
3981 size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
3983 size = offsetof(struct fxregs_state, xmm_space[0]);
3985 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3988 static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
3989 struct fxregs_state *new)
3991 int rc = X86EMUL_CONTINUE;
3992 struct fxregs_state old;
3994 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
3995 if (rc != X86EMUL_CONTINUE)
3999 * 64 bit host will restore XMM 8-15, which is not correct on non-64
4000 * bit guests. Load the current values in order to preserve 64 bit
4001 * XMMs after fxrstor.
4003 #ifdef CONFIG_X86_64
4004 /* XXX: accessing XMM 8-15 very awkwardly */
4005 memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
4009 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
4010 * does save and restore MXCSR.
4012 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
4013 memcpy(new->xmm_space, old.xmm_space, 8 * 16);
4018 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4020 struct fxregs_state fx_state;
4023 rc = check_fxsr(ctxt);
4024 if (rc != X86EMUL_CONTINUE)
4027 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
4028 if (rc != X86EMUL_CONTINUE)
4031 if (fx_state.mxcsr >> 16)
4032 return emulate_gp(ctxt, 0);
4034 ctxt->ops->get_fpu(ctxt);
4036 if (ctxt->mode < X86EMUL_MODE_PROT64)
4037 rc = fxrstor_fixup(ctxt, &fx_state);
4039 if (rc == X86EMUL_CONTINUE)
4040 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4042 ctxt->ops->put_fpu(ctxt);
4047 static bool valid_cr(int nr)
4059 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4061 if (!valid_cr(ctxt->modrm_reg))
4062 return emulate_ud(ctxt);
4064 return X86EMUL_CONTINUE;
4067 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4069 u64 new_val = ctxt->src.val64;
4070 int cr = ctxt->modrm_reg;
4073 static u64 cr_reserved_bits[] = {
4074 0xffffffff00000000ULL,
4075 0, 0, 0, /* CR3 checked later */
4082 return emulate_ud(ctxt);
4084 if (new_val & cr_reserved_bits[cr])
4085 return emulate_gp(ctxt, 0);
4090 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4091 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4092 return emulate_gp(ctxt, 0);
4094 cr4 = ctxt->ops->get_cr(ctxt, 4);
4095 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4097 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4098 !(cr4 & X86_CR4_PAE))
4099 return emulate_gp(ctxt, 0);
4106 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4107 if (efer & EFER_LMA)
4108 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
4111 return emulate_gp(ctxt, 0);
4116 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4118 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4119 return emulate_gp(ctxt, 0);
4125 return X86EMUL_CONTINUE;
4128 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4132 ctxt->ops->get_dr(ctxt, 7, &dr7);
4134 /* Check if DR7.Global_Enable is set */
4135 return dr7 & (1 << 13);
4138 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4140 int dr = ctxt->modrm_reg;
4144 return emulate_ud(ctxt);
4146 cr4 = ctxt->ops->get_cr(ctxt, 4);
4147 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4148 return emulate_ud(ctxt);
4150 if (check_dr7_gd(ctxt)) {
4153 ctxt->ops->get_dr(ctxt, 6, &dr6);
4155 dr6 |= DR6_BD | DR6_RTM;
4156 ctxt->ops->set_dr(ctxt, 6, dr6);
4157 return emulate_db(ctxt);
4160 return X86EMUL_CONTINUE;
4163 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4165 u64 new_val = ctxt->src.val64;
4166 int dr = ctxt->modrm_reg;
4168 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4169 return emulate_gp(ctxt, 0);
4171 return check_dr_read(ctxt);
4174 static int check_svme(struct x86_emulate_ctxt *ctxt)
4178 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4180 if (!(efer & EFER_SVME))
4181 return emulate_ud(ctxt);
4183 return X86EMUL_CONTINUE;
4186 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4188 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4190 /* Valid physical address? */
4191 if (rax & 0xffff000000000000ULL)
4192 return emulate_gp(ctxt, 0);
4194 return check_svme(ctxt);
4197 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4199 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4201 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4202 return emulate_ud(ctxt);
4204 return X86EMUL_CONTINUE;
4207 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4209 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4210 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4212 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4213 ctxt->ops->check_pmc(ctxt, rcx))
4214 return emulate_gp(ctxt, 0);
4216 return X86EMUL_CONTINUE;
4219 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4221 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4222 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4223 return emulate_gp(ctxt, 0);
4225 return X86EMUL_CONTINUE;
4228 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4230 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4231 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4232 return emulate_gp(ctxt, 0);
4234 return X86EMUL_CONTINUE;
4237 #define D(_y) { .flags = (_y) }
4238 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4239 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4240 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4241 #define N D(NotImpl)
4242 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4243 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4244 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4245 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4246 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4247 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4248 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4249 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4250 #define II(_f, _e, _i) \
4251 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4252 #define IIP(_f, _e, _i, _p) \
4253 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4254 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4255 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4257 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4258 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4259 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4260 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4261 #define I2bvIP(_f, _e, _i, _p) \
4262 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4264 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4265 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4266 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4268 static const struct opcode group7_rm0[] = {
4270 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4274 static const struct opcode group7_rm1[] = {
4275 DI(SrcNone | Priv, monitor),
4276 DI(SrcNone | Priv, mwait),
4280 static const struct opcode group7_rm3[] = {
4281 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4282 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4283 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4284 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4285 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4286 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4287 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4288 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4291 static const struct opcode group7_rm7[] = {
4293 DIP(SrcNone, rdtscp, check_rdtsc),
4297 static const struct opcode group1[] = {
4299 F(Lock | PageTable, em_or),
4302 F(Lock | PageTable, em_and),
4308 static const struct opcode group1A[] = {
4309 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4312 static const struct opcode group2[] = {
4313 F(DstMem | ModRM, em_rol),
4314 F(DstMem | ModRM, em_ror),
4315 F(DstMem | ModRM, em_rcl),
4316 F(DstMem | ModRM, em_rcr),
4317 F(DstMem | ModRM, em_shl),
4318 F(DstMem | ModRM, em_shr),
4319 F(DstMem | ModRM, em_shl),
4320 F(DstMem | ModRM, em_sar),
4323 static const struct opcode group3[] = {
4324 F(DstMem | SrcImm | NoWrite, em_test),
4325 F(DstMem | SrcImm | NoWrite, em_test),
4326 F(DstMem | SrcNone | Lock, em_not),
4327 F(DstMem | SrcNone | Lock, em_neg),
4328 F(DstXacc | Src2Mem, em_mul_ex),
4329 F(DstXacc | Src2Mem, em_imul_ex),
4330 F(DstXacc | Src2Mem, em_div_ex),
4331 F(DstXacc | Src2Mem, em_idiv_ex),
4334 static const struct opcode group4[] = {
4335 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4336 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4340 static const struct opcode group5[] = {
4341 F(DstMem | SrcNone | Lock, em_inc),
4342 F(DstMem | SrcNone | Lock, em_dec),
4343 I(SrcMem | NearBranch, em_call_near_abs),
4344 I(SrcMemFAddr | ImplicitOps, em_call_far),
4345 I(SrcMem | NearBranch, em_jmp_abs),
4346 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4347 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4350 static const struct opcode group6[] = {
4351 DI(Prot | DstMem, sldt),
4352 DI(Prot | DstMem, str),
4353 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4354 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4358 static const struct group_dual group7 = { {
4359 II(Mov | DstMem, em_sgdt, sgdt),
4360 II(Mov | DstMem, em_sidt, sidt),
4361 II(SrcMem | Priv, em_lgdt, lgdt),
4362 II(SrcMem | Priv, em_lidt, lidt),
4363 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4364 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4365 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4369 N, EXT(0, group7_rm3),
4370 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4371 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4375 static const struct opcode group8[] = {
4377 F(DstMem | SrcImmByte | NoWrite, em_bt),
4378 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4379 F(DstMem | SrcImmByte | Lock, em_btr),
4380 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4383 static const struct group_dual group9 = { {
4384 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4386 N, N, N, N, N, N, N, N,
4389 static const struct opcode group11[] = {
4390 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4394 static const struct gprefix pfx_0f_ae_7 = {
4395 I(SrcMem | ByteOp, em_clflush), N, N, N,
4398 static const struct group_dual group15 = { {
4399 I(ModRM | Aligned16, em_fxsave),
4400 I(ModRM | Aligned16, em_fxrstor),
4401 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4403 N, N, N, N, N, N, N, N,
4406 static const struct gprefix pfx_0f_6f_0f_7f = {
4407 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4410 static const struct instr_dual instr_dual_0f_2b = {
4414 static const struct gprefix pfx_0f_2b = {
4415 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4418 static const struct gprefix pfx_0f_28_0f_29 = {
4419 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4422 static const struct gprefix pfx_0f_e7 = {
4423 N, I(Sse, em_mov), N, N,
4426 static const struct escape escape_d9 = { {
4427 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4430 N, N, N, N, N, N, N, N,
4432 N, N, N, N, N, N, N, N,
4434 N, N, N, N, N, N, N, N,
4436 N, N, N, N, N, N, N, N,
4438 N, N, N, N, N, N, N, N,
4440 N, N, N, N, N, N, N, N,
4442 N, N, N, N, N, N, N, N,
4444 N, N, N, N, N, N, N, N,
4447 static const struct escape escape_db = { {
4448 N, N, N, N, N, N, N, N,
4451 N, N, N, N, N, N, N, N,
4453 N, N, N, N, N, N, N, N,
4455 N, N, N, N, N, N, N, N,
4457 N, N, N, N, N, N, N, N,
4459 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4461 N, N, N, N, N, N, N, N,
4463 N, N, N, N, N, N, N, N,
4465 N, N, N, N, N, N, N, N,
4468 static const struct escape escape_dd = { {
4469 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4472 N, N, N, N, N, N, N, N,
4474 N, N, N, N, N, N, N, N,
4476 N, N, N, N, N, N, N, N,
4478 N, N, N, N, N, N, N, N,
4480 N, N, N, N, N, N, N, N,
4482 N, N, N, N, N, N, N, N,
4484 N, N, N, N, N, N, N, N,
4486 N, N, N, N, N, N, N, N,
4489 static const struct instr_dual instr_dual_0f_c3 = {
4490 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4493 static const struct mode_dual mode_dual_63 = {
4494 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4497 static const struct opcode opcode_table[256] = {
4499 F6ALU(Lock, em_add),
4500 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4501 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4503 F6ALU(Lock | PageTable, em_or),
4504 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4507 F6ALU(Lock, em_adc),
4508 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4509 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4511 F6ALU(Lock, em_sbb),
4512 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4513 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4515 F6ALU(Lock | PageTable, em_and), N, N,
4517 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4519 F6ALU(Lock, em_xor), N, N,
4521 F6ALU(NoWrite, em_cmp), N, N,
4523 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4525 X8(I(SrcReg | Stack, em_push)),
4527 X8(I(DstReg | Stack, em_pop)),
4529 I(ImplicitOps | Stack | No64, em_pusha),
4530 I(ImplicitOps | Stack | No64, em_popa),
4531 N, MD(ModRM, &mode_dual_63),
4534 I(SrcImm | Mov | Stack, em_push),
4535 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4536 I(SrcImmByte | Mov | Stack, em_push),
4537 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4538 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4539 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4541 X16(D(SrcImmByte | NearBranch)),
4543 G(ByteOp | DstMem | SrcImm, group1),
4544 G(DstMem | SrcImm, group1),
4545 G(ByteOp | DstMem | SrcImm | No64, group1),
4546 G(DstMem | SrcImmByte, group1),
4547 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4548 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4550 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4551 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4552 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4553 D(ModRM | SrcMem | NoAccess | DstReg),
4554 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4557 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4559 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4560 I(SrcImmFAddr | No64, em_call_far), N,
4561 II(ImplicitOps | Stack, em_pushf, pushf),
4562 II(ImplicitOps | Stack, em_popf, popf),
4563 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4565 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4566 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4567 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4568 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4570 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4571 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4572 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4573 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4575 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4577 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4579 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4580 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4581 I(ImplicitOps | NearBranch, em_ret),
4582 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4583 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4584 G(ByteOp, group11), G(0, group11),
4586 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4587 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4588 I(ImplicitOps, em_ret_far),
4589 D(ImplicitOps), DI(SrcImmByte, intn),
4590 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4592 G(Src2One | ByteOp, group2), G(Src2One, group2),
4593 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4594 I(DstAcc | SrcImmUByte | No64, em_aam),
4595 I(DstAcc | SrcImmUByte | No64, em_aad),
4596 F(DstAcc | ByteOp | No64, em_salc),
4597 I(DstAcc | SrcXLat | ByteOp, em_mov),
4599 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4601 X3(I(SrcImmByte | NearBranch, em_loop)),
4602 I(SrcImmByte | NearBranch, em_jcxz),
4603 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4604 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4606 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4607 I(SrcImmFAddr | No64, em_jmp_far),
4608 D(SrcImmByte | ImplicitOps | NearBranch),
4609 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4610 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4612 N, DI(ImplicitOps, icebp), N, N,
4613 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4614 G(ByteOp, group3), G(0, group3),
4616 D(ImplicitOps), D(ImplicitOps),
4617 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4618 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4621 static const struct opcode twobyte_table[256] = {
4623 G(0, group6), GD(0, &group7), N, N,
4624 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4625 II(ImplicitOps | Priv, em_clts, clts), N,
4626 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4627 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4629 N, N, N, N, N, N, N, N,
4630 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4631 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4633 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4634 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4635 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4637 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4640 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4641 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4642 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4645 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4646 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4647 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4648 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4649 I(ImplicitOps | EmulateOnUD, em_sysenter),
4650 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4652 N, N, N, N, N, N, N, N,
4654 X16(D(DstReg | SrcMem | ModRM)),
4656 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4661 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4666 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4668 X16(D(SrcImm | NearBranch)),
4670 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4672 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4673 II(ImplicitOps, em_cpuid, cpuid),
4674 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4675 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4676 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4678 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4679 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4680 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4681 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4682 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4683 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4685 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4686 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4687 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4688 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4689 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4690 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4694 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4695 I(DstReg | SrcMem | ModRM, em_bsf_c),
4696 I(DstReg | SrcMem | ModRM, em_bsr_c),
4697 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4699 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4700 N, ID(0, &instr_dual_0f_c3),
4701 N, N, N, GD(0, &group9),
4703 X8(I(DstReg, em_bswap)),
4705 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4707 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4708 N, N, N, N, N, N, N, N,
4710 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4713 static const struct instr_dual instr_dual_0f_38_f0 = {
4714 I(DstReg | SrcMem | Mov, em_movbe), N
4717 static const struct instr_dual instr_dual_0f_38_f1 = {
4718 I(DstMem | SrcReg | Mov, em_movbe), N
4721 static const struct gprefix three_byte_0f_38_f0 = {
4722 ID(0, &instr_dual_0f_38_f0), N, N, N
4725 static const struct gprefix three_byte_0f_38_f1 = {
4726 ID(0, &instr_dual_0f_38_f1), N, N, N
4730 * Insns below are selected by the prefix which indexed by the third opcode
4733 static const struct opcode opcode_map_0f_38[256] = {
4735 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4737 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4739 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4740 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4761 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4765 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4771 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4772 unsigned size, bool sign_extension)
4774 int rc = X86EMUL_CONTINUE;
4778 op->addr.mem.ea = ctxt->_eip;
4779 /* NB. Immediates are sign-extended as necessary. */
4780 switch (op->bytes) {
4782 op->val = insn_fetch(s8, ctxt);
4785 op->val = insn_fetch(s16, ctxt);
4788 op->val = insn_fetch(s32, ctxt);
4791 op->val = insn_fetch(s64, ctxt);
4794 if (!sign_extension) {
4795 switch (op->bytes) {
4803 op->val &= 0xffffffff;
4811 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4814 int rc = X86EMUL_CONTINUE;
4818 decode_register_operand(ctxt, op);
4821 rc = decode_imm(ctxt, op, 1, false);
4824 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4828 if (ctxt->d & BitOp)
4829 fetch_bit_operand(ctxt);
4830 op->orig_val = op->val;
4833 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4837 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4838 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4839 fetch_register_operand(op);
4840 op->orig_val = op->val;
4844 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4845 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4846 fetch_register_operand(op);
4847 op->orig_val = op->val;
4850 if (ctxt->d & ByteOp) {
4855 op->bytes = ctxt->op_bytes;
4856 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4857 fetch_register_operand(op);
4858 op->orig_val = op->val;
4862 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4864 register_address(ctxt, VCPU_REGS_RDI);
4865 op->addr.mem.seg = VCPU_SREG_ES;
4872 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4873 fetch_register_operand(op);
4878 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4881 rc = decode_imm(ctxt, op, 1, true);
4889 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4892 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4895 ctxt->memop.bytes = 1;
4896 if (ctxt->memop.type == OP_REG) {
4897 ctxt->memop.addr.reg = decode_register(ctxt,
4898 ctxt->modrm_rm, true);
4899 fetch_register_operand(&ctxt->memop);
4903 ctxt->memop.bytes = 2;
4906 ctxt->memop.bytes = 4;
4909 rc = decode_imm(ctxt, op, 2, false);
4912 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4916 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4918 register_address(ctxt, VCPU_REGS_RSI);
4919 op->addr.mem.seg = ctxt->seg_override;
4925 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4928 reg_read(ctxt, VCPU_REGS_RBX) +
4929 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4930 op->addr.mem.seg = ctxt->seg_override;
4935 op->addr.mem.ea = ctxt->_eip;
4936 op->bytes = ctxt->op_bytes + 2;
4937 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4940 ctxt->memop.bytes = ctxt->op_bytes + 2;
4944 op->val = VCPU_SREG_ES;
4948 op->val = VCPU_SREG_CS;
4952 op->val = VCPU_SREG_SS;
4956 op->val = VCPU_SREG_DS;
4960 op->val = VCPU_SREG_FS;
4964 op->val = VCPU_SREG_GS;
4967 /* Special instructions do their own operand decoding. */
4969 op->type = OP_NONE; /* Disable writeback. */
4977 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4979 int rc = X86EMUL_CONTINUE;
4980 int mode = ctxt->mode;
4981 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4982 bool op_prefix = false;
4983 bool has_seg_override = false;
4984 struct opcode opcode;
4986 ctxt->memop.type = OP_NONE;
4987 ctxt->memopp = NULL;
4988 ctxt->_eip = ctxt->eip;
4989 ctxt->fetch.ptr = ctxt->fetch.data;
4990 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4991 ctxt->opcode_len = 1;
4993 memcpy(ctxt->fetch.data, insn, insn_len);
4995 rc = __do_insn_fetch_bytes(ctxt, 1);
4996 if (rc != X86EMUL_CONTINUE)
5001 case X86EMUL_MODE_REAL:
5002 case X86EMUL_MODE_VM86:
5003 case X86EMUL_MODE_PROT16:
5004 def_op_bytes = def_ad_bytes = 2;
5006 case X86EMUL_MODE_PROT32:
5007 def_op_bytes = def_ad_bytes = 4;
5009 #ifdef CONFIG_X86_64
5010 case X86EMUL_MODE_PROT64:
5016 return EMULATION_FAILED;
5019 ctxt->op_bytes = def_op_bytes;
5020 ctxt->ad_bytes = def_ad_bytes;
5022 /* Legacy prefixes. */
5024 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5025 case 0x66: /* operand-size override */
5027 /* switch between 2/4 bytes */
5028 ctxt->op_bytes = def_op_bytes ^ 6;
5030 case 0x67: /* address-size override */
5031 if (mode == X86EMUL_MODE_PROT64)
5032 /* switch between 4/8 bytes */
5033 ctxt->ad_bytes = def_ad_bytes ^ 12;
5035 /* switch between 2/4 bytes */
5036 ctxt->ad_bytes = def_ad_bytes ^ 6;
5038 case 0x26: /* ES override */
5039 case 0x2e: /* CS override */
5040 case 0x36: /* SS override */
5041 case 0x3e: /* DS override */
5042 has_seg_override = true;
5043 ctxt->seg_override = (ctxt->b >> 3) & 3;
5045 case 0x64: /* FS override */
5046 case 0x65: /* GS override */
5047 has_seg_override = true;
5048 ctxt->seg_override = ctxt->b & 7;
5050 case 0x40 ... 0x4f: /* REX */
5051 if (mode != X86EMUL_MODE_PROT64)
5053 ctxt->rex_prefix = ctxt->b;
5055 case 0xf0: /* LOCK */
5056 ctxt->lock_prefix = 1;
5058 case 0xf2: /* REPNE/REPNZ */
5059 case 0xf3: /* REP/REPE/REPZ */
5060 ctxt->rep_prefix = ctxt->b;
5066 /* Any legacy prefix after a REX prefix nullifies its effect. */
5068 ctxt->rex_prefix = 0;
5074 if (ctxt->rex_prefix & 8)
5075 ctxt->op_bytes = 8; /* REX.W */
5077 /* Opcode byte(s). */
5078 opcode = opcode_table[ctxt->b];
5079 /* Two-byte opcode? */
5080 if (ctxt->b == 0x0f) {
5081 ctxt->opcode_len = 2;
5082 ctxt->b = insn_fetch(u8, ctxt);
5083 opcode = twobyte_table[ctxt->b];
5085 /* 0F_38 opcode map */
5086 if (ctxt->b == 0x38) {
5087 ctxt->opcode_len = 3;
5088 ctxt->b = insn_fetch(u8, ctxt);
5089 opcode = opcode_map_0f_38[ctxt->b];
5092 ctxt->d = opcode.flags;
5094 if (ctxt->d & ModRM)
5095 ctxt->modrm = insn_fetch(u8, ctxt);
5097 /* vex-prefix instructions are not implemented */
5098 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5099 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5103 while (ctxt->d & GroupMask) {
5104 switch (ctxt->d & GroupMask) {
5106 goffset = (ctxt->modrm >> 3) & 7;
5107 opcode = opcode.u.group[goffset];
5110 goffset = (ctxt->modrm >> 3) & 7;
5111 if ((ctxt->modrm >> 6) == 3)
5112 opcode = opcode.u.gdual->mod3[goffset];
5114 opcode = opcode.u.gdual->mod012[goffset];
5117 goffset = ctxt->modrm & 7;
5118 opcode = opcode.u.group[goffset];
5121 if (ctxt->rep_prefix && op_prefix)
5122 return EMULATION_FAILED;
5123 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5124 switch (simd_prefix) {
5125 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5126 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5127 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5128 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5132 if (ctxt->modrm > 0xbf)
5133 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5135 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5138 if ((ctxt->modrm >> 6) == 3)
5139 opcode = opcode.u.idual->mod3;
5141 opcode = opcode.u.idual->mod012;
5144 if (ctxt->mode == X86EMUL_MODE_PROT64)
5145 opcode = opcode.u.mdual->mode64;
5147 opcode = opcode.u.mdual->mode32;
5150 return EMULATION_FAILED;
5153 ctxt->d &= ~(u64)GroupMask;
5154 ctxt->d |= opcode.flags;
5159 return EMULATION_FAILED;
5161 ctxt->execute = opcode.u.execute;
5163 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5164 return EMULATION_FAILED;
5166 if (unlikely(ctxt->d &
5167 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5170 * These are copied unconditionally here, and checked unconditionally
5171 * in x86_emulate_insn.
5173 ctxt->check_perm = opcode.check_perm;
5174 ctxt->intercept = opcode.intercept;
5176 if (ctxt->d & NotImpl)
5177 return EMULATION_FAILED;
5179 if (mode == X86EMUL_MODE_PROT64) {
5180 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5182 else if (ctxt->d & NearBranch)
5186 if (ctxt->d & Op3264) {
5187 if (mode == X86EMUL_MODE_PROT64)
5193 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5197 ctxt->op_bytes = 16;
5198 else if (ctxt->d & Mmx)
5202 /* ModRM and SIB bytes. */
5203 if (ctxt->d & ModRM) {
5204 rc = decode_modrm(ctxt, &ctxt->memop);
5205 if (!has_seg_override) {
5206 has_seg_override = true;
5207 ctxt->seg_override = ctxt->modrm_seg;
5209 } else if (ctxt->d & MemAbs)
5210 rc = decode_abs(ctxt, &ctxt->memop);
5211 if (rc != X86EMUL_CONTINUE)
5214 if (!has_seg_override)
5215 ctxt->seg_override = VCPU_SREG_DS;
5217 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5220 * Decode and fetch the source operand: register, memory
5223 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5224 if (rc != X86EMUL_CONTINUE)
5228 * Decode and fetch the second source operand: register, memory
5231 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5232 if (rc != X86EMUL_CONTINUE)
5235 /* Decode and fetch the destination operand: register or memory. */
5236 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5238 if (ctxt->rip_relative && likely(ctxt->memopp))
5239 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5240 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5243 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5246 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5248 return ctxt->d & PageTable;
5251 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5253 /* The second termination condition only applies for REPE
5254 * and REPNE. Test if the repeat string operation prefix is
5255 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5256 * corresponding termination condition according to:
5257 * - if REPE/REPZ and ZF = 0 then done
5258 * - if REPNE/REPNZ and ZF = 1 then done
5260 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5261 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5262 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5263 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5264 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5265 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5271 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5275 ctxt->ops->get_fpu(ctxt);
5276 rc = asm_safe("fwait");
5277 ctxt->ops->put_fpu(ctxt);
5279 if (unlikely(rc != X86EMUL_CONTINUE))
5280 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5282 return X86EMUL_CONTINUE;
5285 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5288 if (op->type == OP_MM)
5289 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5292 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5294 register void *__sp asm(_ASM_SP);
5295 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5297 if (!(ctxt->d & ByteOp))
5298 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5300 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5301 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5302 [fastop]"+S"(fop), "+r"(__sp)
5303 : "c"(ctxt->src2.val));
5305 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5306 if (!fop) /* exception is returned in fop variable */
5307 return emulate_de(ctxt);
5308 return X86EMUL_CONTINUE;
5311 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5313 memset(&ctxt->rip_relative, 0,
5314 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5316 ctxt->io_read.pos = 0;
5317 ctxt->io_read.end = 0;
5318 ctxt->mem_read.end = 0;
5321 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5323 const struct x86_emulate_ops *ops = ctxt->ops;
5324 int rc = X86EMUL_CONTINUE;
5325 int saved_dst_type = ctxt->dst.type;
5326 unsigned emul_flags;
5328 ctxt->mem_read.pos = 0;
5330 /* LOCK prefix is allowed only with some instructions */
5331 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5332 rc = emulate_ud(ctxt);
5336 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5337 rc = emulate_ud(ctxt);
5341 emul_flags = ctxt->ops->get_hflags(ctxt);
5342 if (unlikely(ctxt->d &
5343 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5344 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5345 (ctxt->d & Undefined)) {
5346 rc = emulate_ud(ctxt);
5350 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5351 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5352 rc = emulate_ud(ctxt);
5356 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5357 rc = emulate_nm(ctxt);
5361 if (ctxt->d & Mmx) {
5362 rc = flush_pending_x87_faults(ctxt);
5363 if (rc != X86EMUL_CONTINUE)
5366 * Now that we know the fpu is exception safe, we can fetch
5369 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5370 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5371 if (!(ctxt->d & Mov))
5372 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5375 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5376 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5377 X86_ICPT_PRE_EXCEPT);
5378 if (rc != X86EMUL_CONTINUE)
5382 /* Instruction can only be executed in protected mode */
5383 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5384 rc = emulate_ud(ctxt);
5388 /* Privileged instruction can be executed only in CPL=0 */
5389 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5390 if (ctxt->d & PrivUD)
5391 rc = emulate_ud(ctxt);
5393 rc = emulate_gp(ctxt, 0);
5397 /* Do instruction specific permission checks */
5398 if (ctxt->d & CheckPerm) {
5399 rc = ctxt->check_perm(ctxt);
5400 if (rc != X86EMUL_CONTINUE)
5404 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5405 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5406 X86_ICPT_POST_EXCEPT);
5407 if (rc != X86EMUL_CONTINUE)
5411 if (ctxt->rep_prefix && (ctxt->d & String)) {
5412 /* All REP prefixes have the same first termination condition */
5413 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5414 string_registers_quirk(ctxt);
5415 ctxt->eip = ctxt->_eip;
5416 ctxt->eflags &= ~X86_EFLAGS_RF;
5422 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5423 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5424 ctxt->src.valptr, ctxt->src.bytes);
5425 if (rc != X86EMUL_CONTINUE)
5427 ctxt->src.orig_val64 = ctxt->src.val64;
5430 if (ctxt->src2.type == OP_MEM) {
5431 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5432 &ctxt->src2.val, ctxt->src2.bytes);
5433 if (rc != X86EMUL_CONTINUE)
5437 if ((ctxt->d & DstMask) == ImplicitOps)
5441 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5442 /* optimisation - avoid slow emulated read if Mov */
5443 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5444 &ctxt->dst.val, ctxt->dst.bytes);
5445 if (rc != X86EMUL_CONTINUE) {
5446 if (!(ctxt->d & NoWrite) &&
5447 rc == X86EMUL_PROPAGATE_FAULT &&
5448 ctxt->exception.vector == PF_VECTOR)
5449 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5453 /* Copy full 64-bit value for CMPXCHG8B. */
5454 ctxt->dst.orig_val64 = ctxt->dst.val64;
5458 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5459 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5460 X86_ICPT_POST_MEMACCESS);
5461 if (rc != X86EMUL_CONTINUE)
5465 if (ctxt->rep_prefix && (ctxt->d & String))
5466 ctxt->eflags |= X86_EFLAGS_RF;
5468 ctxt->eflags &= ~X86_EFLAGS_RF;
5470 if (ctxt->execute) {
5471 if (ctxt->d & Fastop) {
5472 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5473 rc = fastop(ctxt, fop);
5474 if (rc != X86EMUL_CONTINUE)
5478 rc = ctxt->execute(ctxt);
5479 if (rc != X86EMUL_CONTINUE)
5484 if (ctxt->opcode_len == 2)
5486 else if (ctxt->opcode_len == 3)
5487 goto threebyte_insn;
5490 case 0x70 ... 0x7f: /* jcc (short) */
5491 if (test_cc(ctxt->b, ctxt->eflags))
5492 rc = jmp_rel(ctxt, ctxt->src.val);
5494 case 0x8d: /* lea r16/r32, m */
5495 ctxt->dst.val = ctxt->src.addr.mem.ea;
5497 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5498 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5499 ctxt->dst.type = OP_NONE;
5503 case 0x98: /* cbw/cwde/cdqe */
5504 switch (ctxt->op_bytes) {
5505 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5506 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5507 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5510 case 0xcc: /* int3 */
5511 rc = emulate_int(ctxt, 3);
5513 case 0xcd: /* int n */
5514 rc = emulate_int(ctxt, ctxt->src.val);
5516 case 0xce: /* into */
5517 if (ctxt->eflags & X86_EFLAGS_OF)
5518 rc = emulate_int(ctxt, 4);
5520 case 0xe9: /* jmp rel */
5521 case 0xeb: /* jmp rel short */
5522 rc = jmp_rel(ctxt, ctxt->src.val);
5523 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5525 case 0xf4: /* hlt */
5526 ctxt->ops->halt(ctxt);
5528 case 0xf5: /* cmc */
5529 /* complement carry flag from eflags reg */
5530 ctxt->eflags ^= X86_EFLAGS_CF;
5532 case 0xf8: /* clc */
5533 ctxt->eflags &= ~X86_EFLAGS_CF;
5535 case 0xf9: /* stc */
5536 ctxt->eflags |= X86_EFLAGS_CF;
5538 case 0xfc: /* cld */
5539 ctxt->eflags &= ~X86_EFLAGS_DF;
5541 case 0xfd: /* std */
5542 ctxt->eflags |= X86_EFLAGS_DF;
5545 goto cannot_emulate;
5548 if (rc != X86EMUL_CONTINUE)
5552 if (ctxt->d & SrcWrite) {
5553 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5554 rc = writeback(ctxt, &ctxt->src);
5555 if (rc != X86EMUL_CONTINUE)
5558 if (!(ctxt->d & NoWrite)) {
5559 rc = writeback(ctxt, &ctxt->dst);
5560 if (rc != X86EMUL_CONTINUE)
5565 * restore dst type in case the decoding will be reused
5566 * (happens for string instruction )
5568 ctxt->dst.type = saved_dst_type;
5570 if ((ctxt->d & SrcMask) == SrcSI)
5571 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5573 if ((ctxt->d & DstMask) == DstDI)
5574 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5576 if (ctxt->rep_prefix && (ctxt->d & String)) {
5578 struct read_cache *r = &ctxt->io_read;
5579 if ((ctxt->d & SrcMask) == SrcSI)
5580 count = ctxt->src.count;
5582 count = ctxt->dst.count;
5583 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5585 if (!string_insn_completed(ctxt)) {
5587 * Re-enter guest when pio read ahead buffer is empty
5588 * or, if it is not used, after each 1024 iteration.
5590 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5591 (r->end == 0 || r->end != r->pos)) {
5593 * Reset read cache. Usually happens before
5594 * decode, but since instruction is restarted
5595 * we have to do it here.
5597 ctxt->mem_read.end = 0;
5598 writeback_registers(ctxt);
5599 return EMULATION_RESTART;
5601 goto done; /* skip rip writeback */
5603 ctxt->eflags &= ~X86_EFLAGS_RF;
5606 ctxt->eip = ctxt->_eip;
5609 if (rc == X86EMUL_PROPAGATE_FAULT) {
5610 WARN_ON(ctxt->exception.vector > 0x1f);
5611 ctxt->have_exception = true;
5613 if (rc == X86EMUL_INTERCEPTED)
5614 return EMULATION_INTERCEPTED;
5616 if (rc == X86EMUL_CONTINUE)
5617 writeback_registers(ctxt);
5619 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5623 case 0x09: /* wbinvd */
5624 (ctxt->ops->wbinvd)(ctxt);
5626 case 0x08: /* invd */
5627 case 0x0d: /* GrpP (prefetch) */
5628 case 0x18: /* Grp16 (prefetch/nop) */
5629 case 0x1f: /* nop */
5631 case 0x20: /* mov cr, reg */
5632 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5634 case 0x21: /* mov from dr to reg */
5635 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5637 case 0x40 ... 0x4f: /* cmov */
5638 if (test_cc(ctxt->b, ctxt->eflags))
5639 ctxt->dst.val = ctxt->src.val;
5640 else if (ctxt->op_bytes != 4)
5641 ctxt->dst.type = OP_NONE; /* no writeback */
5643 case 0x80 ... 0x8f: /* jnz rel, etc*/
5644 if (test_cc(ctxt->b, ctxt->eflags))
5645 rc = jmp_rel(ctxt, ctxt->src.val);
5647 case 0x90 ... 0x9f: /* setcc r/m8 */
5648 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5650 case 0xb6 ... 0xb7: /* movzx */
5651 ctxt->dst.bytes = ctxt->op_bytes;
5652 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5653 : (u16) ctxt->src.val;
5655 case 0xbe ... 0xbf: /* movsx */
5656 ctxt->dst.bytes = ctxt->op_bytes;
5657 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5658 (s16) ctxt->src.val;
5661 goto cannot_emulate;
5666 if (rc != X86EMUL_CONTINUE)
5672 return EMULATION_FAILED;
5675 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5677 invalidate_registers(ctxt);
5680 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5682 writeback_registers(ctxt);
5685 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5687 if (ctxt->rep_prefix && (ctxt->d & String))
5690 if (ctxt->d & TwoMemOp)