On 4/19/2023 2:43 PM, Chao Gao wrote:
On Tue, Apr 04, 2023 at 09:09:22PM +0800, Binbin Wu wrote:
Untag address for 64-bit memory/mmio operand in instruction emulations
and vmexit handlers when LAM is applicable.
For instruction emulation, untag address in __linearize() before
canonical check. LAM doesn't apply to instruction fetch and invlpg,
use KVM_X86_UNTAG_ADDR_SKIP_LAM to skip LAM untag.
For vmexit handlings related to 64-bit linear address:
- Cases need to untag address
Operand(s) of VMX instructions and INVPCID
Operand(s) of SGX ENCLS
Linear address in INVVPID descriptor.
- Cases LAM doesn't apply to (no change needed)
Operand of INVLPG
Linear address in INVPCID descriptor
Co-developed-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
Tested-by: Xuelian Guo <xuelian.guo@xxxxxxxxx>
---
arch/x86/kvm/emulate.c | 23 ++++++++++++++++++-----
arch/x86/kvm/kvm_emulate.h | 2 ++
arch/x86/kvm/vmx/nested.c | 4 ++++
arch/x86/kvm/vmx/sgx.c | 1 +
arch/x86/kvm/x86.c | 10 ++++++++++
5 files changed, 35 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a20bec931764..b7df465eccf2 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -688,7 +688,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
- enum x86emul_mode mode, ulong *linear)
+ enum x86emul_mode mode, ulong *linear,
+ u64 untag_flags)
@write and @fetch are like flags. I think we can consolidate them into
the @flags first as a cleanup patch and then add a flag for LAM.
OK. Here is the proposed cleanup patch:
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -687,8 +687,8 @@ static unsigned insn_alignment(struct
x86_emulate_ctxt *ctxt, unsigned size)
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
- bool write, bool fetch,
- enum x86emul_mode mode, ulong
*linear)
+ u64 flags, enum x86emul_mode mode,
+ ulong *linear)
{
struct desc_struct desc;
bool usable;
@@ -696,6 +696,8 @@ static __always_inline int __linearize(struct
x86_emulate_ctxt *ctxt,
u32 lim;
u16 sel;
u8 va_bits;
+ bool fetch = !!(flags & KVM_X86_EMULFLAG_FETCH);
+ bool write = !!(flags & KVM_X86_EMULFLAG_WRITE);
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
@@ -757,7 +759,12 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
- return __linearize(ctxt, addr, &max_size, size, write, false,
+ u64 flags = 0;
+
+ if (write)
+ flags |= KVM_X86_EMULFLAG_WRITE;
+
+ return __linearize(ctxt, addr, &max_size, size, flags,
ctxt->mode, linear);
}
@@ -768,10 +775,11 @@ static inline int assign_eip(struct
x86_emulate_ctxt *ctxt, ulong dst)
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
+ u64 flags = KVM_X86_EMULFLAG_FETCH;
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true,
ctxt->mode, &linear);
+ rc = __linearize(ctxt, addr, &max_size, 1, flags, ctxt->mode,
&linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@@ -896,6 +904,7 @@ static int __do_insn_fetch_bytes(struct
x86_emulate_ctxt *ctxt, int op_size)
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
+ u64 flags = KVM_X86_EMULFLAG_FETCH;
/*
* We do not know exactly how many bytes will be needed, and
@@ -907,8 +916,7 @@ static int __do_insn_fetch_bytes(struct
x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
- rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
- &linear);
+ rc = __linearize(ctxt, addr, &max_size, 0, flags, ctxt->mode,
&linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index a8167b47b8c8..8076e013ff9f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -48,6 +48,15 @@ void kvm_spurious_fault(void);
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
+/* x86-specific emulation flags */
+#define KVM_X86_EMULFLAG_FETCH _BITULL(0)
+#define KVM_X86_EMULFLAG_WRITE _BITULL(1)
And the following two will be defined for untag:
#define KVM_X86_EMULFLAG_SKIP_UNTAG_VMX _BITULL(2)
#define KVM_X86_EMULFLAG_SKIP_UNTAG_SVM _BITULL(3) /* reserved for
SVM */