The function will be used outside of the emulator. Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx> --- arch/x86/include/asm/kvm_emulate.h | 14 ++++-- arch/x86/kvm/emulate.c | 84 ++------------------------------ arch/x86/kvm/x86.c | 92 ++++++++++++++++++++++++++++++++++-- 3 files changed, 103 insertions(+), 87 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7c276ca..bdf8a84 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -43,6 +43,11 @@ struct x86_instruction_info { u64 next_rip; /* rip following the instruction */ }; +struct segmented_address { + ulong ea; + unsigned seg; +}; + /* * x86_emulate_ops: * @@ -194,6 +199,10 @@ struct x86_emulate_ops { bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); + + int (*linearize)(struct x86_emulate_ctxt *ctxt, + struct segmented_address addr, unsigned size, + bool write, bool fetch, ulong *linear); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -208,10 +217,7 @@ struct operand { }; union { unsigned long *reg; - struct segmented_address { - ulong ea; - unsigned seg; - } mem; + struct segmented_address mem; unsigned xmm; unsigned mm; } addr; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e317588..24b1c70 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -470,14 +470,6 @@ static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) ctxt->seg_override = seg; } -static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) -{ - if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) - return 0; - - return ctxt->ops->get_cached_segment_base(ctxt, seg); -} - static unsigned seg_override(struct x86_emulate_ctxt *ctxt) { if (!ctxt->has_seg_override) @@ -505,11 +497,6 @@ static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) return emulate_exception(ctxt, GP_VECTOR, err, true); } -static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) -{ - return emulate_exception(ctxt, SS_VECTOR, err, true); -} - static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); @@ -578,74 +565,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, unsigned size, bool write, bool fetch, ulong *linear) { - struct desc_struct desc; - bool usable; - ulong la; - u32 lim; - u16 sel; - unsigned cpl, rpl; + int err = ctxt->ops->linearize(ctxt, addr, size, write, fetch, linear); - la = seg_base(ctxt, addr.seg) + addr.ea; - switch (ctxt->mode) { - case X86EMUL_MODE_REAL: - break; - case X86EMUL_MODE_PROT64: - if (((signed long)la << 16) >> 16 != la) - return emulate_gp(ctxt, 0); - break; - default: - usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, - addr.seg); - if (!usable) - goto bad; - /* code segment or read-only data segment */ - if (((desc.type & 8) || !(desc.type & 2)) && write) - goto bad; - /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) - goto bad; - lim = desc_limit_scaled(&desc); - if ((desc.type & 8) || !(desc.type & 4)) { - /* expand-up segment */ - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) - goto bad; - } else { - /* exapand-down segment */ - if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) - goto bad; - lim = desc.d ? 0xffffffff : 0xffff; - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) - goto bad; - } - cpl = ctxt->ops->cpl(ctxt); - rpl = sel & 3; - cpl = max(cpl, rpl); - if (!(desc.type & 8)) { - /* data segment */ - if (cpl > desc.dpl) - goto bad; - } else if ((desc.type & 8) && !(desc.type & 4)) { - /* nonconforming code segment */ - if (cpl != desc.dpl) - goto bad; - } else if ((desc.type & 8) && (desc.type & 4)) { - /* conforming code segment */ - if (cpl < desc.dpl) - goto bad; - } - break; - } - if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) - la &= (u32)-1; - if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) + if (err >= 0) + return emulate_exception(ctxt, err, (int)*linear, true); + + if (insn_aligned(ctxt, size) && ((*linear & (size - 1)) != 0)) return emulate_gp(ctxt, 0); - *linear = la; + return X86EMUL_CONTINUE; -bad: - if (addr.seg == VCPU_SREG_SS) - return emulate_ss(ctxt, addr.seg); - else - return emulate_gp(ctxt, addr.seg); } static int linearize(struct x86_emulate_ctxt *ctxt, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6fa0e21..5ce24a8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3656,6 +3656,84 @@ out: } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); +static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) +{ + return kvm_x86_ops->get_segment_base(vcpu, seg); +} + +static int kvm_linearize_address(struct kvm_vcpu *vcpu, enum x86emul_mode mode, + ulong ea, unsigned seg, unsigned size, bool write, bool fetch, + u8 ad_bytes, ulong *linear) +{ + ulong la; + unsigned cpl, rpl; + struct kvm_segment desc; + + la = get_segment_base(vcpu, seg) + ea; + switch (mode) { + case X86EMUL_MODE_REAL: + break; + case X86EMUL_MODE_PROT64: + if (((signed long)la << 16) >> 16 != la) { + *linear = 0; + return GP_VECTOR; + } + break; + default: + kvm_get_segment(vcpu, &desc, seg); + if (desc.unusable) + goto bad; + /* code segment or read-only data segment */ + if (((desc.type & 8) || !(desc.type & 2)) && write) + goto bad; + /* unreadable code segment */ + if (!fetch && (desc.type & 8) && !(desc.type & 2)) + goto bad; + if ((desc.type & 8) || !(desc.type & 4)) { + /* expand-up segment */ + if (ea > desc.limit || + (u32)(ea + size - 1) > desc.limit) + goto bad; + } else { + u32 lim; + /* exapand-down segment */ + if (ea <= desc.limit || + (u32)(ea + size - 1) <= desc.limit) + goto bad; + lim = desc.db ? 0xffffffff : 0xffff; + if (ea > lim || (u32)(ea + size - 1) > lim) + goto bad; + } + cpl = kvm_x86_ops->get_cpl(vcpu); + rpl = desc.selector & 3; + cpl = max(cpl, rpl); + if (!(desc.type & 8)) { + /* data segment */ + if (cpl > desc.dpl) + goto bad; + } else if ((desc.type & 8) && !(desc.type & 4)) { + /* nonconforming code segment */ + if (cpl != desc.dpl) + goto bad; + } else if ((desc.type & 8) && (desc.type & 4)) { + /* conforming code segment */ + if (cpl < desc.dpl) + goto bad; + } + break; + } + if (fetch ? mode != X86EMUL_MODE_PROT64 : ad_bytes != 8) + la &= (u32)-1; + *linear = la; + return -1; +bad: + *linear = (ulong)seg; + if (seg == VCPU_SREG_SS) + return SS_VECTOR; + else + return GP_VECTOR; +} + static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) @@ -4044,11 +4122,6 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } -static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) -{ - return kvm_x86_ops->get_segment_base(vcpu, seg); -} - static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); @@ -4319,6 +4392,14 @@ static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, return false; } +static int emulator_linearize_address(struct x86_emulate_ctxt *ctxt, + struct segmented_address addr, unsigned size, + bool write, bool fetch, ulong *linear) +{ + return kvm_linearize_address(emul_to_vcpu(ctxt), ctxt->mode, addr.ea, + addr.seg, size, write, fetch, ctxt->ad_bytes, linear); +} + static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, @@ -4352,6 +4433,7 @@ static struct x86_emulate_ops emulate_ops = { .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, + .linearize = emulator_linearize_address, }; static void cache_all_regs(struct kvm_vcpu *vcpu) -- 1.7.7.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html