Consolidate two bool parameters (write/fetch) of __linearize() into a 'u32 flags' parameter to make the function be more concise and future extendable, i.e. to support Intel Linear Address Masking (LAM), which allows high non-address bits of linear address to be used as metadata. Define two flags to replace the two bools. A new flag will be added to to support LAM to skip masking off metadata bits of linear address under some conditions. No functional change intended. Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx> Reviewed-by: Chao Gao <chao.gao@xxxxxxxxx> Acked-by: Kai Huang <kai.huang@xxxxxxxxx> --- arch/x86/kvm/emulate.c | 19 ++++++++++--------- arch/x86/kvm/kvm_emulate.h | 4 ++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 936a397a08cd..e89afc39e56f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, - bool write, bool fetch, - enum x86emul_mode mode, ulong *linear) + enum x86emul_mode mode, ulong *linear, + u32 flags) { struct desc_struct desc; bool usable; @@ -718,10 +718,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) - || !(desc.type & 2)) && write) + || !(desc.type & 2)) && (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) + if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { @@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, - ctxt->mode, linear); + return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, + write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) @@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; @@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, - &linear); + rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index ab65f3a47dfd..5b9ec610b2cb 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -88,6 +88,10 @@ struct x86_instruction_info { #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ #define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */ +/* x86-specific emulation flags */ +#define X86EMUL_F_FETCH BIT(0) +#define X86EMUL_F_WRITE BIT(1) + struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); /* -- 2.25.1