mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
KVM: x86 emulator: preserve an operand's segment identity
Currently the x86 emulator converts the segment register associated with an operand into a segment base which is added into the operand address. This loss of information results in us not doing segment limit checks properly. Replace struct operand's addr.mem field by a segmented_address structure which holds both the effetive address and segment. This will allow us to do the limit check at the point of access. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
d53db5efc2
commit
90de84f50b
@ -159,7 +159,10 @@ struct operand {
|
|||||||
};
|
};
|
||||||
union {
|
union {
|
||||||
unsigned long *reg;
|
unsigned long *reg;
|
||||||
unsigned long mem;
|
struct segmented_address {
|
||||||
|
ulong ea;
|
||||||
|
unsigned seg;
|
||||||
|
} mem;
|
||||||
} addr;
|
} addr;
|
||||||
union {
|
union {
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
@ -410,9 +410,9 @@ address_mask(struct decode_cache *c, unsigned long reg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
|
register_address(struct decode_cache *c, unsigned long reg)
|
||||||
{
|
{
|
||||||
return base + address_mask(c, reg);
|
return address_mask(c, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -444,26 +444,26 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
|
|||||||
return ops->get_cached_segment_base(seg, ctxt->vcpu);
|
return ops->get_cached_segment_base(seg, ctxt->vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
|
static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
|
||||||
struct x86_emulate_ops *ops,
|
struct x86_emulate_ops *ops,
|
||||||
struct decode_cache *c)
|
struct decode_cache *c)
|
||||||
{
|
{
|
||||||
if (!c->has_seg_override)
|
if (!c->has_seg_override)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return seg_base(ctxt, ops, c->seg_override);
|
return c->seg_override;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
|
static ulong linear(struct x86_emulate_ctxt *ctxt,
|
||||||
struct x86_emulate_ops *ops)
|
struct segmented_address addr)
|
||||||
{
|
{
|
||||||
return seg_base(ctxt, ops, VCPU_SREG_ES);
|
struct decode_cache *c = &ctxt->decode;
|
||||||
}
|
ulong la;
|
||||||
|
|
||||||
static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
|
la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
|
||||||
struct x86_emulate_ops *ops)
|
if (c->ad_bytes != 8)
|
||||||
{
|
la &= (u32)-1;
|
||||||
return seg_base(ctxt, ops, VCPU_SREG_SS);
|
return la;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
|
static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
|
||||||
@ -556,7 +556,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs,
|
|||||||
|
|
||||||
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
|
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
struct x86_emulate_ops *ops,
|
struct x86_emulate_ops *ops,
|
||||||
ulong addr,
|
struct segmented_address addr,
|
||||||
u16 *size, unsigned long *address, int op_bytes)
|
u16 *size, unsigned long *address, int op_bytes)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
@ -564,10 +564,12 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||||||
if (op_bytes == 2)
|
if (op_bytes == 2)
|
||||||
op_bytes = 3;
|
op_bytes = 3;
|
||||||
*address = 0;
|
*address = 0;
|
||||||
rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
|
rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
|
||||||
|
ctxt->vcpu, NULL);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
|
rc = ops->read_std(linear(ctxt, addr) + 2, address, op_bytes,
|
||||||
|
ctxt->vcpu, NULL);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -760,7 +762,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
op->addr.mem = modrm_ea;
|
op->addr.mem.ea = modrm_ea;
|
||||||
done:
|
done:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -775,13 +777,13 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
|
|||||||
op->type = OP_MEM;
|
op->type = OP_MEM;
|
||||||
switch (c->ad_bytes) {
|
switch (c->ad_bytes) {
|
||||||
case 2:
|
case 2:
|
||||||
op->addr.mem = insn_fetch(u16, 2, c->eip);
|
op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
op->addr.mem = insn_fetch(u32, 4, c->eip);
|
op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
op->addr.mem = insn_fetch(u64, 8, c->eip);
|
op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
@ -800,7 +802,7 @@ static void fetch_bit_operand(struct decode_cache *c)
|
|||||||
else if (c->src.bytes == 4)
|
else if (c->src.bytes == 4)
|
||||||
sv = (s32)c->src.val & (s32)mask;
|
sv = (s32)c->src.val & (s32)mask;
|
||||||
|
|
||||||
c->dst.addr.mem += (sv >> 3);
|
c->dst.addr.mem.ea += (sv >> 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* only subword offset */
|
/* only subword offset */
|
||||||
@ -1093,7 +1095,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
|
|||||||
case OP_MEM:
|
case OP_MEM:
|
||||||
if (c->lock_prefix)
|
if (c->lock_prefix)
|
||||||
rc = ops->cmpxchg_emulated(
|
rc = ops->cmpxchg_emulated(
|
||||||
c->dst.addr.mem,
|
linear(ctxt, c->dst.addr.mem),
|
||||||
&c->dst.orig_val,
|
&c->dst.orig_val,
|
||||||
&c->dst.val,
|
&c->dst.val,
|
||||||
c->dst.bytes,
|
c->dst.bytes,
|
||||||
@ -1101,7 +1103,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
|
|||||||
ctxt->vcpu);
|
ctxt->vcpu);
|
||||||
else
|
else
|
||||||
rc = ops->write_emulated(
|
rc = ops->write_emulated(
|
||||||
c->dst.addr.mem,
|
linear(ctxt, c->dst.addr.mem),
|
||||||
&c->dst.val,
|
&c->dst.val,
|
||||||
c->dst.bytes,
|
c->dst.bytes,
|
||||||
&err,
|
&err,
|
||||||
@ -1129,8 +1131,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
|
|||||||
c->dst.bytes = c->op_bytes;
|
c->dst.bytes = c->op_bytes;
|
||||||
c->dst.val = c->src.val;
|
c->dst.val = c->src.val;
|
||||||
register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
|
register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
|
||||||
c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
|
c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
|
||||||
c->regs[VCPU_REGS_RSP]);
|
c->dst.addr.mem.seg = VCPU_SREG_SS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
|
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
|
||||||
@ -1139,10 +1141,11 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
|
|||||||
{
|
{
|
||||||
struct decode_cache *c = &ctxt->decode;
|
struct decode_cache *c = &ctxt->decode;
|
||||||
int rc;
|
int rc;
|
||||||
|
struct segmented_address addr;
|
||||||
|
|
||||||
rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
|
addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
|
||||||
c->regs[VCPU_REGS_RSP]),
|
addr.seg = VCPU_SREG_SS;
|
||||||
dest, len);
|
rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
@ -2223,14 +2226,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
|
|||||||
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
|
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
|
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
|
||||||
int reg, struct operand *op)
|
int reg, struct operand *op)
|
||||||
{
|
{
|
||||||
struct decode_cache *c = &ctxt->decode;
|
struct decode_cache *c = &ctxt->decode;
|
||||||
int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
|
int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
|
||||||
|
|
||||||
register_address_increment(c, &c->regs[reg], df * op->bytes);
|
register_address_increment(c, &c->regs[reg], df * op->bytes);
|
||||||
op->addr.mem = register_address(c, base, c->regs[reg]);
|
op->addr.mem.ea = register_address(c, c->regs[reg]);
|
||||||
|
op->addr.mem.seg = seg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int em_push(struct x86_emulate_ctxt *ctxt)
|
static int em_push(struct x86_emulate_ctxt *ctxt)
|
||||||
@ -2639,7 +2643,7 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
|
|||||||
|
|
||||||
op->type = OP_IMM;
|
op->type = OP_IMM;
|
||||||
op->bytes = size;
|
op->bytes = size;
|
||||||
op->addr.mem = c->eip;
|
op->addr.mem.ea = c->eip;
|
||||||
/* NB. Immediates are sign-extended as necessary. */
|
/* NB. Immediates are sign-extended as necessary. */
|
||||||
switch (op->bytes) {
|
switch (op->bytes) {
|
||||||
case 1:
|
case 1:
|
||||||
@ -2821,14 +2825,13 @@ done_prefixes:
|
|||||||
if (!c->has_seg_override)
|
if (!c->has_seg_override)
|
||||||
set_seg_override(c, VCPU_SREG_DS);
|
set_seg_override(c, VCPU_SREG_DS);
|
||||||
|
|
||||||
if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
|
memop.addr.mem.seg = seg_override(ctxt, ops, c);
|
||||||
memop.addr.mem += seg_override_base(ctxt, ops, c);
|
|
||||||
|
|
||||||
if (memop.type == OP_MEM && c->ad_bytes != 8)
|
if (memop.type == OP_MEM && c->ad_bytes != 8)
|
||||||
memop.addr.mem = (u32)memop.addr.mem;
|
memop.addr.mem.ea = (u32)memop.addr.mem.ea;
|
||||||
|
|
||||||
if (memop.type == OP_MEM && c->rip_relative)
|
if (memop.type == OP_MEM && c->rip_relative)
|
||||||
memop.addr.mem += c->eip;
|
memop.addr.mem.ea += c->eip;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decode and fetch the source operand: register, memory
|
* Decode and fetch the source operand: register, memory
|
||||||
@ -2880,14 +2883,14 @@ done_prefixes:
|
|||||||
case SrcSI:
|
case SrcSI:
|
||||||
c->src.type = OP_MEM;
|
c->src.type = OP_MEM;
|
||||||
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||||
c->src.addr.mem =
|
c->src.addr.mem.ea =
|
||||||
register_address(c, seg_override_base(ctxt, ops, c),
|
register_address(c, c->regs[VCPU_REGS_RSI]);
|
||||||
c->regs[VCPU_REGS_RSI]);
|
c->src.addr.mem.seg = seg_override(ctxt, ops, c),
|
||||||
c->src.val = 0;
|
c->src.val = 0;
|
||||||
break;
|
break;
|
||||||
case SrcImmFAddr:
|
case SrcImmFAddr:
|
||||||
c->src.type = OP_IMM;
|
c->src.type = OP_IMM;
|
||||||
c->src.addr.mem = c->eip;
|
c->src.addr.mem.ea = c->eip;
|
||||||
c->src.bytes = c->op_bytes + 2;
|
c->src.bytes = c->op_bytes + 2;
|
||||||
insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
|
insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
|
||||||
break;
|
break;
|
||||||
@ -2934,7 +2937,7 @@ done_prefixes:
|
|||||||
break;
|
break;
|
||||||
case DstImmUByte:
|
case DstImmUByte:
|
||||||
c->dst.type = OP_IMM;
|
c->dst.type = OP_IMM;
|
||||||
c->dst.addr.mem = c->eip;
|
c->dst.addr.mem.ea = c->eip;
|
||||||
c->dst.bytes = 1;
|
c->dst.bytes = 1;
|
||||||
c->dst.val = insn_fetch(u8, 1, c->eip);
|
c->dst.val = insn_fetch(u8, 1, c->eip);
|
||||||
break;
|
break;
|
||||||
@ -2959,9 +2962,9 @@ done_prefixes:
|
|||||||
case DstDI:
|
case DstDI:
|
||||||
c->dst.type = OP_MEM;
|
c->dst.type = OP_MEM;
|
||||||
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
|
||||||
c->dst.addr.mem =
|
c->dst.addr.mem.ea =
|
||||||
register_address(c, es_base(ctxt, ops),
|
register_address(c, c->regs[VCPU_REGS_RDI]);
|
||||||
c->regs[VCPU_REGS_RDI]);
|
c->dst.addr.mem.seg = VCPU_SREG_ES;
|
||||||
c->dst.val = 0;
|
c->dst.val = 0;
|
||||||
break;
|
break;
|
||||||
case ImplicitOps:
|
case ImplicitOps:
|
||||||
@ -3040,7 +3043,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
|
if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
|
||||||
rc = read_emulated(ctxt, ops, c->src.addr.mem,
|
rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
|
||||||
c->src.valptr, c->src.bytes);
|
c->src.valptr, c->src.bytes);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
@ -3048,7 +3051,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (c->src2.type == OP_MEM) {
|
if (c->src2.type == OP_MEM) {
|
||||||
rc = read_emulated(ctxt, ops, c->src2.addr.mem,
|
rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
|
||||||
&c->src2.val, c->src2.bytes);
|
&c->src2.val, c->src2.bytes);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
@ -3060,7 +3063,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
|
if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
|
||||||
/* optimisation - avoid slow emulated read if Mov */
|
/* optimisation - avoid slow emulated read if Mov */
|
||||||
rc = read_emulated(ctxt, ops, c->dst.addr.mem,
|
rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
|
||||||
&c->dst.val, c->dst.bytes);
|
&c->dst.val, c->dst.bytes);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
goto done;
|
goto done;
|
||||||
@ -3211,7 +3214,7 @@ special_insn:
|
|||||||
c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
|
c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
|
||||||
break;
|
break;
|
||||||
case 0x8d: /* lea r16/r32, m */
|
case 0x8d: /* lea r16/r32, m */
|
||||||
c->dst.val = c->src.addr.mem;
|
c->dst.val = c->src.addr.mem.ea;
|
||||||
break;
|
break;
|
||||||
case 0x8e: { /* mov seg, r/m16 */
|
case 0x8e: { /* mov seg, r/m16 */
|
||||||
uint16_t sel;
|
uint16_t sel;
|
||||||
@ -3438,11 +3441,11 @@ writeback:
|
|||||||
c->dst.type = saved_dst_type;
|
c->dst.type = saved_dst_type;
|
||||||
|
|
||||||
if ((c->d & SrcMask) == SrcSI)
|
if ((c->d & SrcMask) == SrcSI)
|
||||||
string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
|
string_addr_inc(ctxt, seg_override(ctxt, ops, c),
|
||||||
VCPU_REGS_RSI, &c->src);
|
VCPU_REGS_RSI, &c->src);
|
||||||
|
|
||||||
if ((c->d & DstMask) == DstDI)
|
if ((c->d & DstMask) == DstDI)
|
||||||
string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
|
string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
|
||||||
&c->dst);
|
&c->dst);
|
||||||
|
|
||||||
if (c->rep_prefix && (c->d & String)) {
|
if (c->rep_prefix && (c->d & String)) {
|
||||||
@ -3535,7 +3538,8 @@ twobyte_insn:
|
|||||||
emulate_ud(ctxt);
|
emulate_ud(ctxt);
|
||||||
goto done;
|
goto done;
|
||||||
case 7: /* invlpg*/
|
case 7: /* invlpg*/
|
||||||
emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
|
emulate_invlpg(ctxt->vcpu,
|
||||||
|
linear(ctxt, c->src.addr.mem));
|
||||||
/* Disable writeback. */
|
/* Disable writeback. */
|
||||||
c->dst.type = OP_NONE;
|
c->dst.type = OP_NONE;
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user