#define MemAbs (1<<11) /* Memory operand is absolute displacement */
#define String (1<<12) /* String instruction (rep capable) */
#define Stack (1<<13) /* Stack instruction (push/pop) */
+#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */
#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
-#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
-#define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
+#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */
+#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */
+#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */
#define Sse (1<<17) /* SSE Vector instruction */
-#define RMExt (1<<18) /* Opcode extension in ModRM r/m if mod == 3 */
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define VendorSpecific (1<<22) /* Vendor specific instruction */
return rc;
}
+static int em_pop(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ return emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
+}
+
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
void *dest, int len)
return rc;
}
+static int em_popf(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ c->dst.type = OP_REG;
+ c->dst.addr.reg = &ctxt->eflags;
+ c->dst.bytes = c->op_bytes;
+ return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
+}
+
static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, int seg)
{
return rc;
}
-static int emulate_pusha(struct x86_emulate_ctxt *ctxt)
+static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
unsigned long old_esp = c->regs[VCPU_REGS_RSP];
return rc;
}
-static int emulate_popa(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static int em_pushf(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ c->src.val = (unsigned long)ctxt->eflags;
+ return em_push(ctxt);
+}
+
+static int em_popa(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
--reg;
}
- rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
+ rc = emulate_pop(ctxt, ctxt->ops, &c->regs[reg], c->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
--reg;
return X86EMUL_CONTINUE;
}
+static int em_add(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_or(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_adc(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_sbb(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_and(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_sub(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_xor(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_cmp(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
static int em_imul(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
return X86EMUL_CONTINUE;
}
+static int em_vmcall(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int rc;
+
+ if (c->modrm_mod != 3 || c->modrm_rm != 1)
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = ctxt->ops->fix_hypercall(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ /* Let the processor re-execute the fixed hypercall */
+ c->eip = ctxt->eip;
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ struct desc_ptr desc_ptr;
+ int rc;
+
+ rc = read_descriptor(ctxt, ctxt->ops, c->src.addr.mem,
+ &desc_ptr.size, &desc_ptr.address,
+ c->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ ctxt->ops->set_gdt(ctxt, &desc_ptr);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int rc;
+
+ rc = ctxt->ops->fix_hypercall(ctxt);
+
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+ return rc;
+}
+
+static int em_lidt(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ struct desc_ptr desc_ptr;
+ int rc;
+
+ rc = read_descriptor(ctxt, ctxt->ops, c->src.addr.mem,
+ &desc_ptr.size,
+ &desc_ptr.address,
+ c->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ ctxt->ops->set_idt(ctxt, &desc_ptr);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_smsw(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ c->dst.bytes = 2;
+ c->dst.val = ctxt->ops->get_cr(ctxt, 0);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
+ | (c->src.val & 0x0f));
+ c->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
/* Valid physical address? */
- if (rax & 0xffff000000000000)
+ if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
#define N D(0)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
-#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
+#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
-#define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
- D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
- D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
+#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
+ I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
+ I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static struct opcode group7_rm1[] = {
DI(SrcNone | ModRM | Priv, monitor),
static struct opcode group7_rm3[] = {
DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
- DI(SrcNone | ModRM | Prot | VendorSpecific, vmmcall),
+ II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
+
static struct opcode group1[] = {
- X7(D(Lock)), N
+ I(Lock, em_add),
+ I(Lock, em_or),
+ I(Lock, em_adc),
+ I(Lock, em_sbb),
+ I(Lock, em_and),
+ I(Lock, em_sub),
+ I(Lock, em_xor),
+ I(0, em_cmp),
};
static struct opcode group1A[] = {
static struct group_dual group7 = { {
DI(ModRM | Mov | DstMem | Priv, sgdt),
DI(ModRM | Mov | DstMem | Priv, sidt),
- DI(ModRM | SrcMem | Priv, lgdt), DI(ModRM | SrcMem | Priv, lidt),
- DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
- DI(SrcMem16 | ModRM | Mov | Priv, lmsw),
- DI(SrcMem | ModRM | ByteOp | Priv | NoAccess, invlpg),
+ II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
+ II(ModRM | SrcMem | Priv, em_lidt, lidt),
+ II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
+ II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
- D(SrcNone | ModRM | Priv | VendorSpecific), EXT(0, group7_rm1),
+ I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
+ EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
- DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
- DI(SrcMem16 | ModRM | Mov | Priv, lmsw), EXT(0, group7_rm7),
+ II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
} };
static struct opcode group8[] = {
static struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
- D6ALU(Lock),
+ I6ALU(Lock, em_add),
D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
/* 0x08 - 0x0F */
- D6ALU(Lock),
+ I6ALU(Lock, em_or),
D(ImplicitOps | Stack | No64), N,
/* 0x10 - 0x17 */
- D6ALU(Lock),
+ I6ALU(Lock, em_adc),
D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
/* 0x18 - 0x1F */
- D6ALU(Lock),
+ I6ALU(Lock, em_sbb),
D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
/* 0x20 - 0x27 */
- D6ALU(Lock), N, N,
+ I6ALU(Lock, em_and), N, N,
/* 0x28 - 0x2F */
- D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
+ I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
- D6ALU(Lock), N, N,
+ I6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
- D6ALU(0), N, N,
+ I6ALU(0, em_cmp), N, N,
/* 0x40 - 0x4F */
X16(D(DstReg)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
- X8(D(DstReg | Stack)),
+ X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
- D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
+ I(ImplicitOps | Stack | No64, em_pusha),
+ I(ImplicitOps | Stack | No64, em_popa),
N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
N, N, N, N,
/* 0x68 - 0x6F */
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
- DI(ImplicitOps | Stack, pushf), DI(ImplicitOps | Stack, popf), N, N,
+ II(ImplicitOps | Stack, em_pushf, pushf),
+ II(ImplicitOps | Stack, em_popf, popf), N, N,
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
- D2bv(SrcSI | DstDI | String),
+ I2bv(SrcSI | DstDI | String, em_cmp),
/* 0xA8 - 0xAF */
D2bv(DstAcc | SrcImm),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
- D2bv(SrcAcc | DstDI | String),
+ I2bv(SrcAcc | DstDI | String, em_cmp),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
#undef D2bv
#undef D2bvIP
#undef I2bv
-#undef D6ALU
+#undef I6ALU
static unsigned imm_size(struct decode_cache *c)
{
struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
- int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
+ int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
- struct opcode opcode, *g_mod012, *g_mod3;
+ struct opcode opcode;
struct operand memop = { .type = OP_NONE };
c->eip = ctxt->eip;
}
c->d = opcode.flags;
- if (c->d & Group) {
- dual = c->d & GroupDual;
- c->modrm = insn_fetch(u8, 1, c->eip);
- --c->eip;
-
- if (c->d & GroupDual) {
- g_mod012 = opcode.u.gdual->mod012;
- g_mod3 = opcode.u.gdual->mod3;
- } else
- g_mod012 = g_mod3 = opcode.u.group;
-
- c->d &= ~(Group | GroupDual);
-
- goffset = (c->modrm >> 3) & 7;
-
- if ((c->modrm >> 6) == 3)
- opcode = g_mod3[goffset];
- else
- opcode = g_mod012[goffset];
-
- if (opcode.flags & RMExt) {
+ while (c->d & GroupMask) {
+ switch (c->d & GroupMask) {
+ case Group:
+ c->modrm = insn_fetch(u8, 1, c->eip);
+ --c->eip;
+ goffset = (c->modrm >> 3) & 7;
+ opcode = opcode.u.group[goffset];
+ break;
+ case GroupDual:
+ c->modrm = insn_fetch(u8, 1, c->eip);
+ --c->eip;
+ goffset = (c->modrm >> 3) & 7;
+ if ((c->modrm >> 6) == 3)
+ opcode = opcode.u.gdual->mod3[goffset];
+ else
+ opcode = opcode.u.gdual->mod012[goffset];
+ break;
+ case RMExt:
goffset = c->modrm & 7;
opcode = opcode.u.group[goffset];
- }
-
- c->d |= opcode.flags;
- }
-
- if (c->d & Prefix) {
- if (c->rep_prefix && op_prefix)
+ break;
+ case Prefix:
+ if (c->rep_prefix && op_prefix)
+ return X86EMUL_UNHANDLEABLE;
+ simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
+ switch (simd_prefix) {
+ case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
+ case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
+ case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
+ case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
+ }
+ break;
+ default:
return X86EMUL_UNHANDLEABLE;
- simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
- switch (simd_prefix) {
- case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
- case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
- case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
- case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
+
+ c->d &= ~GroupMask;
c->d |= opcode.flags;
}
int rc = X86EMUL_CONTINUE;
int saved_dst_type = c->dst.type;
int irq; /* Used for int 3, int, and into */
- struct desc_ptr desc_ptr;
ctxt->decode.mem_read.pos = 0;
goto twobyte_insn;
switch (c->b) {
- case 0x00 ... 0x05:
- add: /* add */
- emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
- break;
case 0x06: /* push es */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
break;
case 0x07: /* pop es */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
break;
- case 0x08 ... 0x0d:
- or: /* or */
- emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
- break;
case 0x0e: /* push cs */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
break;
- case 0x10 ... 0x15:
- adc: /* adc */
- emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
- break;
case 0x16: /* push ss */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
break;
case 0x17: /* pop ss */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
break;
- case 0x18 ... 0x1d:
- sbb: /* sbb */
- emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
- break;
case 0x1e: /* push ds */
rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
break;
case 0x1f: /* pop ds */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
break;
- case 0x20 ... 0x25:
- and: /* and */
- emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
- break;
- case 0x28 ... 0x2d:
- sub: /* sub */
- emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
- break;
- case 0x30 ... 0x35:
- xor: /* xor */
- emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
- break;
- case 0x38 ... 0x3d:
- cmp: /* cmp */
- c->dst.type = OP_NONE; /* Disable writeback. */
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
- break;
case 0x40 ... 0x47: /* inc r16/r32 */
emulate_1op("inc", c->dst, ctxt->eflags);
break;
case 0x48 ... 0x4f: /* dec r16/r32 */
emulate_1op("dec", c->dst, ctxt->eflags);
break;
- case 0x58 ... 0x5f: /* pop reg */
- pop_instruction:
- rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
- break;
- case 0x60: /* pusha */
- rc = emulate_pusha(ctxt);
- break;
- case 0x61: /* popa */
- rc = emulate_popa(ctxt, ops);
- break;
case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate;
if (test_cc(c->b, ctxt->eflags))
jmp_rel(c, c->src.val);
break;
- case 0x80 ... 0x83: /* Grp1 */
- switch (c->modrm_reg) {
- case 0:
- goto add;
- case 1:
- goto or;
- case 2:
- goto adc;
- case 3:
- goto sbb;
- case 4:
- goto and;
- case 5:
- goto sub;
- case 6:
- goto xor;
- case 7:
- goto cmp;
- }
- break;
case 0x84 ... 0x85:
test:
emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
case 8: c->dst.val = (s32)c->dst.val; break;
}
break;
- case 0x9c: /* pushf */
- c->src.val = (unsigned long) ctxt->eflags;
- rc = em_push(ctxt);
- break;
- case 0x9d: /* popf */
- c->dst.type = OP_REG;
- c->dst.addr.reg = &ctxt->eflags;
- c->dst.bytes = c->op_bytes;
- rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
- break;
- case 0xa6 ... 0xa7: /* cmps */
- goto cmp;
case 0xa8 ... 0xa9: /* test ax, imm */
goto test;
- case 0xae ... 0xaf: /* scas */
- goto cmp;
case 0xc0 ... 0xc1:
emulate_grp2(ctxt);
break;
c->dst.type = OP_REG;
c->dst.addr.reg = &c->eip;
c->dst.bytes = c->op_bytes;
- goto pop_instruction;
+ rc = em_pop(ctxt);
+ break;
case 0xc4: /* les */
rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
break;
jump_far:
memcpy(&sel, c->src.valptr + c->op_bytes, 2);
- if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
+ rc = load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS);
+ if (rc != X86EMUL_CONTINUE)
goto done;
c->eip = 0;
twobyte_insn:
switch (c->b) {
- case 0x01: /* lgdt, lidt, lmsw */
- switch (c->modrm_reg) {
- case 0: /* vmcall */
- if (c->modrm_mod != 3 || c->modrm_rm != 1)
- goto cannot_emulate;
-
- rc = ctxt->ops->fix_hypercall(ctxt);
- if (rc != X86EMUL_CONTINUE)
- goto done;
-
- /* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->eip;
- /* Disable writeback. */
- c->dst.type = OP_NONE;
- break;
- case 2: /* lgdt */
- rc = read_descriptor(ctxt, ops, c->src.addr.mem,
- &desc_ptr.size, &desc_ptr.address,
- c->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- ctxt->ops->set_gdt(ctxt, &desc_ptr);
- /* Disable writeback. */
- c->dst.type = OP_NONE;
- break;
- case 3: /* lidt/vmmcall */
- if (c->modrm_mod == 3) {
- switch (c->modrm_rm) {
- case 1:
- rc = ctxt->ops->fix_hypercall(ctxt);
- break;
- default:
- goto cannot_emulate;
- }
- } else {
- rc = read_descriptor(ctxt, ops, c->src.addr.mem,
- &desc_ptr.size,
- &desc_ptr.address,
- c->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- ctxt->ops->set_idt(ctxt, &desc_ptr);
- }
- /* Disable writeback. */
- c->dst.type = OP_NONE;
- break;
- case 4: /* smsw */
- c->dst.bytes = 2;
- c->dst.val = ops->get_cr(ctxt, 0);
- break;
- case 6: /* lmsw */
- ops->set_cr(ctxt, 0, (ops->get_cr(ctxt, 0) & ~0x0eul) |
- (c->src.val & 0x0f));
- c->dst.type = OP_NONE;
- break;
- case 5: /* not defined */
- emulate_ud(ctxt);
- rc = X86EMUL_PROPAGATE_FAULT;
- goto done;
- case 7: /* invlpg*/
- rc = em_invlpg(ctxt);
- break;
- default:
- goto cannot_emulate;
- }
- break;
case 0x05: /* syscall */
rc = emulate_syscall(ctxt, ops);
break;
rc = em_clts(ctxt);
break;
case 0x09: /* wbinvd */
- kvm_emulate_wbinvd(ctxt->vcpu);
+ (ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */