This patch adds segment limit checks to the x86 emulator, in addition to some helper functions and changes to the return values of emulate_push to accomodate the new checks. ---- Changes from v1: - Added seg_override_limit() and emulate_ss() helpers - Corrected limit check return values for long mode - Limit Checking is now consistent with the Intel documentation Signed-off-by: Mohammed Gamal <m.gamal005@xxxxxxxxx> --- arch/x86/kvm/emulate.c | 135 ++++++++++++++++++++++++++++++++++++++++-------- 1 files changed, 113 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 6bb7c68..f415d4b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -633,6 +633,15 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, return ops->get_cached_segment_base(seg, ctxt->vcpu); } +static u32 seg_limit(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, int seg) +{ + if (ctxt->mode == X86EMUL_MODE_PROT64) + return -1; + + return ops->get_cached_segment_limit(seg, ctxt->vcpu); +} + static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops, struct decode_cache *c) @@ -643,6 +652,16 @@ static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, return seg_base(ctxt, ops, c->seg_override); } +static u32 seg_override_limit(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + struct decode_cache *c) +{ + if (!c->has_seg_override) + return -1; + + return seg_limit(ctxt, ops, c->seg_override); +} + static unsigned long cs_base(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { @@ -661,6 +680,24 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, return seg_base(ctxt, ops, VCPU_SREG_SS); } +static u32 cs_limit(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + return seg_limit(ctxt, ops, VCPU_SREG_CS); +} + +static u32 es_limit(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + return seg_limit(ctxt, ops, VCPU_SREG_ES); +} + +static u32 ss_limit(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + return seg_limit(ctxt, ops, VCPU_SREG_SS); +} + static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { @@ -675,6 +712,11 @@ static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) emulate_exception(ctxt, GP_VECTOR, err, true); } +static void emulate_ss(struct x86_emulate_ctxt *ctxt, int err) +{ + emulate_exception(ctxt, SS_VECTOR, err, true); +} + static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, int err) { @@ -719,6 +761,12 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, { int rc; + /* eip is already relative to CS, so we just check it against the limit */ + if (eip > cs_limit(ctxt, ops) - size - 1) { + emulate_gp(ctxt, 0); + return X86EMUL_PROPAGATE_FAULT; + } + /* x86 instructions are limited to 15 bytes. */ if (eip + size - ctxt->eip > 15) return X86EMUL_UNHANDLEABLE; @@ -1222,6 +1270,11 @@ done_prefixes: c->src.ptr = (unsigned long *) register_address(c, seg_override_base(ctxt, ops, c), c->regs[VCPU_REGS_RSI]); + if ((unsigned long)c->src.ptr - seg_override_base(ctxt, ops, c) > + seg_override_limit(ctxt, ops, c) - c->src.bytes - 1) { + emulate_gp(ctxt, 0); + return X86EMUL_PROPAGATE_FAULT; + } c->src.val = 0; break; case SrcImmFAddr: @@ -1318,6 +1371,12 @@ done_prefixes: c->dst.ptr = (unsigned long *) register_address(c, es_base(ctxt, ops), c->regs[VCPU_REGS_RDI]); + + if ((unsigned long)c->dst.ptr - es_base(ctxt, ops) > + es_limit(ctxt, ops) - c->dst.bytes - 1) { + emulate_gp(ctxt, 0); + return X86EMUL_PROPAGATE_FAULT; + } c->dst.val = 0; break; } @@ -1637,7 +1696,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; } -static inline void emulate_push(struct x86_emulate_ctxt *ctxt, +static inline int emulate_push(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { struct decode_cache *c = &ctxt->decode; @@ -1648,6 +1707,13 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt, register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops), c->regs[VCPU_REGS_RSP]); + if ((unsigned long) c->dst.ptr - ss_base(ctxt, ops) > + ss_limit(ctxt, ops) - c->dst.bytes - 1) { + emulate_ss(ctxt, 0); + return X86EMUL_PROPAGATE_FAULT; + } + + return X86EMUL_CONTINUE; } static int emulate_pop(struct x86_emulate_ctxt *ctxt, @@ -1655,11 +1721,15 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { struct decode_cache *c = &ctxt->decode; + unsigned long reg_addr = register_address(c, ss_base(ctxt, ops), c->regs[VCPU_REGS_RSP]); int rc; - rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), - c->regs[VCPU_REGS_RSP]), - dest, len); + if (reg_addr - ss_base(ctxt, ops) > ss_limit(ctxt, ops) - c->src.bytes - 1) { + emulate_ss(ctxt, 0); + return X86EMUL_PROPAGATE_FAULT; + } + + rc = read_emulated(ctxt, ops, reg_addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; @@ -1710,14 +1780,14 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, return rc; } -static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, +static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops, int seg) { struct decode_cache *c = &ctxt->decode; c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); - emulate_push(ctxt, ops); + return emulate_push(ctxt, ops); } static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, @@ -1747,7 +1817,10 @@ static int emulate_pusha(struct x86_emulate_ctxt *ctxt, (reg == VCPU_REGS_RSP) ? (c->src.val = old_esp) : (c->src.val = c->regs[reg]); - emulate_push(ctxt, ops); + rc = emulate_push(ctxt, ops); + + if (rc != X86EMUL_CONTINUE) + return rc; rc = writeback(ctxt, ops); if (rc != X86EMUL_CONTINUE) @@ -1859,15 +1932,13 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, old_eip = c->eip; c->eip = c->src.val; c->src.val = old_eip; - emulate_push(ctxt, ops); - break; + return emulate_push(ctxt, ops); } case 4: /* jmp abs */ c->eip = c->src.val; break; case 6: /* push */ - emulate_push(ctxt, ops); - break; + return emulate_push(ctxt, ops); } return X86EMUL_CONTINUE; } @@ -2523,7 +2594,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; c->lock_prefix = 0; c->src.val = (unsigned long) error_code; - emulate_push(ctxt, ops); + ret = emulate_push(ctxt, ops); } return ret; @@ -2656,7 +2727,9 @@ special_insn: emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); break; case 0x06: /* push es */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x07: /* pop es */ rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); @@ -2668,14 +2741,18 @@ special_insn: emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); break; case 0x0e: /* push cs */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x10 ... 0x15: adc: /* adc */ emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); break; case 0x16: /* push ss */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x17: /* pop ss */ rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); @@ -2687,7 +2764,9 @@ special_insn: emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); break; case 0x1e: /* push ds */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x1f: /* pop ds */ rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); @@ -2717,7 +2796,9 @@ special_insn: emulate_1op("dec", c->dst, ctxt->eflags); break; case 0x50 ... 0x57: /* push reg */ - emulate_push(ctxt, ops); + rc = emulate_push(ctxt, ops); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x58 ... 0x5f: /* pop reg */ pop_instruction: @@ -2742,7 +2823,9 @@ special_insn: break; case 0x68: /* push imm */ case 0x6a: /* push imm8 */ - emulate_push(ctxt, ops); + rc = emulate_push(ctxt, ops); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x6c: /* insb */ case 0x6d: /* insw/insd */ @@ -2870,7 +2953,9 @@ special_insn: goto xchg; case 0x9c: /* pushf */ c->src.val = (unsigned long) ctxt->eflags; - emulate_push(ctxt, ops); + rc = emulate_push(ctxt, ops); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0x9d: /* popf */ c->dst.type = OP_REG; @@ -2934,7 +3019,9 @@ special_insn: long int rel = c->src.val; c->src.val = (unsigned long) c->eip; jmp_rel(c, rel); - emulate_push(ctxt, ops); + rc = emulate_push(ctxt, ops); + if (rc != X86EMUL_CONTINUE) + goto done; break; } case 0xe9: /* jmp rel */ @@ -3261,7 +3348,9 @@ twobyte_insn: c->dst.type = OP_NONE; break; case 0xa0: /* push fs */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0xa1: /* pop fs */ rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); @@ -3280,7 +3369,9 @@ twobyte_insn: emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); break; case 0xa8: /* push gs */ - emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); + rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); + if (rc != X86EMUL_CONTINUE) + goto done; break; case 0xa9: /* pop gs */ rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); -- 1.7.0.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html