}
static inline unsigned long
-register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+register_address(struct decode_cache *c, unsigned long reg)
{
- return base + address_mask(c, reg);
+ return address_mask(c, reg);
}
static inline void
return ops->get_cached_segment_base(seg, ctxt->vcpu);
}
-static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct decode_cache *c)
{
if (!c->has_seg_override)
return 0;
- return seg_base(ctxt, ops, c->seg_override);
+ return c->seg_override;
}
-static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static ulong linear(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr)
{
- return seg_base(ctxt, ops, VCPU_SREG_ES);
-}
+ struct decode_cache *c = &ctxt->decode;
+ ulong la;
-static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
-{
- return seg_base(ctxt, ops, VCPU_SREG_SS);
+ la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+ if (c->ad_bytes != 8)
+ la &= (u32)-1;
+ return la;
}
static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- ulong addr,
+ struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
- rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
+ rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
+ ctxt->vcpu, NULL);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
+ rc = ops->read_std(linear(ctxt, addr) + 2, address, op_bytes,
+ ctxt->vcpu, NULL);
return rc;
}
break;
}
}
- op->addr.mem = modrm_ea;
+ op->addr.mem.ea = modrm_ea;
done:
return rc;
}
op->type = OP_MEM;
switch (c->ad_bytes) {
case 2:
- op->addr.mem = insn_fetch(u16, 2, c->eip);
+ op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
break;
case 4:
- op->addr.mem = insn_fetch(u32, 4, c->eip);
+ op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
break;
case 8:
- op->addr.mem = insn_fetch(u64, 8, c->eip);
+ op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
break;
}
done:
else if (c->src.bytes == 4)
sv = (s32)c->src.val & (s32)mask;
- c->dst.addr.mem += (sv >> 3);
+ c->dst.addr.mem.ea += (sv >> 3);
}
/* only subword offset */
case OP_MEM:
if (c->lock_prefix)
rc = ops->cmpxchg_emulated(
- c->dst.addr.mem,
+ linear(ctxt, c->dst.addr.mem),
&c->dst.orig_val,
&c->dst.val,
c->dst.bytes,
ctxt->vcpu);
else
rc = ops->write_emulated(
- c->dst.addr.mem,
+ linear(ctxt, c->dst.addr.mem),
&c->dst.val,
c->dst.bytes,
&err,
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
- c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
- c->regs[VCPU_REGS_RSP]);
+ c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ c->dst.addr.mem.seg = VCPU_SREG_SS;
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
{
struct decode_cache *c = &ctxt->decode;
int rc;
+ struct segmented_address addr;
- rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
- c->regs[VCPU_REGS_RSP]),
- dest, len);
+ addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ addr.seg = VCPU_SREG_SS;
+ rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}
-static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
int reg, struct operand *op)
{
struct decode_cache *c = &ctxt->decode;
int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
register_address_increment(c, &c->regs[reg], df * op->bytes);
- op->addr.mem = register_address(c, base, c->regs[reg]);
+ op->addr.mem.ea = register_address(c, c->regs[reg]);
+ op->addr.mem.seg = seg;
}
static int em_push(struct x86_emulate_ctxt *ctxt)
op->type = OP_IMM;
op->bytes = size;
- op->addr.mem = c->eip;
+ op->addr.mem.ea = c->eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
if (!c->has_seg_override)
set_seg_override(c, VCPU_SREG_DS);
- if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
- memop.addr.mem += seg_override_base(ctxt, ops, c);
+ memop.addr.mem.seg = seg_override(ctxt, ops, c);
if (memop.type == OP_MEM && c->ad_bytes != 8)
- memop.addr.mem = (u32)memop.addr.mem;
+ memop.addr.mem.ea = (u32)memop.addr.mem.ea;
if (memop.type == OP_MEM && c->rip_relative)
- memop.addr.mem += c->eip;
+ memop.addr.mem.ea += c->eip;
/*
* Decode and fetch the source operand: register, memory
case SrcSI:
c->src.type = OP_MEM;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.addr.mem =
- register_address(c, seg_override_base(ctxt, ops, c),
- c->regs[VCPU_REGS_RSI]);
+ c->src.addr.mem.ea =
+ register_address(c, c->regs[VCPU_REGS_RSI]);
+ c->src.addr.mem.seg = seg_override(ctxt, ops, c),
c->src.val = 0;
break;
case SrcImmFAddr:
c->src.type = OP_IMM;
- c->src.addr.mem = c->eip;
+ c->src.addr.mem.ea = c->eip;
c->src.bytes = c->op_bytes + 2;
insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
break;
break;
case DstImmUByte:
c->dst.type = OP_IMM;
- c->dst.addr.mem = c->eip;
+ c->dst.addr.mem.ea = c->eip;
c->dst.bytes = 1;
c->dst.val = insn_fetch(u8, 1, c->eip);
break;
case DstDI:
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.addr.mem =
- register_address(c, es_base(ctxt, ops),
- c->regs[VCPU_REGS_RDI]);
+ c->dst.addr.mem.ea =
+ register_address(c, c->regs[VCPU_REGS_RDI]);
+ c->dst.addr.mem.seg = VCPU_SREG_ES;
c->dst.val = 0;
break;
case ImplicitOps:
}
if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
- rc = read_emulated(ctxt, ops, c->src.addr.mem,
+ rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
c->src.valptr, c->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (c->src2.type == OP_MEM) {
- rc = read_emulated(ctxt, ops, c->src2.addr.mem,
+ rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
&c->src2.val, c->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
- rc = read_emulated(ctxt, ops, c->dst.addr.mem,
+ rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
&c->dst.val, c->dst.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
break;
case 0x8d: /* lea r16/r32, m */
- c->dst.val = c->src.addr.mem;
+ c->dst.val = c->src.addr.mem.ea;
break;
case 0x8e: { /* mov seg, r/m16 */
uint16_t sel;
c->dst.type = saved_dst_type;
if ((c->d & SrcMask) == SrcSI)
- string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
+ string_addr_inc(ctxt, seg_override(ctxt, ops, c),
VCPU_REGS_RSI, &c->src);
if ((c->d & DstMask) == DstDI)
- string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
+ string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
&c->dst);
if (c->rep_prefix && (c->d & String)) {
emulate_ud(ctxt);
goto done;
case 7: /* invlpg*/
- emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
+ emulate_invlpg(ctxt->vcpu,
+ linear(ctxt, c->src.addr.mem));
/* Disable writeback. */
c->dst.type = OP_NONE;
break;