Re: [PATCH v6 6/7] KVM: x86: Untag address when LAM applicable

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 3/20/2023 7:51 PM, Chao Gao wrote:
On Sun, Mar 19, 2023 at 04:49:26PM +0800, Binbin Wu wrote:
Untag address for 64-bit memory/mmio operand in instruction emulations
and vmexit handlers when LAM is applicable.

For instruction emulation, untag address in __linearize() before
canonical check. LAM doesn't apply to instruction fetch and invlpg,
use KVM_X86_UNTAG_ADDR_SKIP_LAM to skip LAM untag.

For vmexit handlings related to 64-bit linear address:
- Cases need to untag address
  Operand(s) of VMX instructions and INVPCID
  Operand(s) of SGX ENCLS
  Linear address in INVVPID descriptor.
- Cases LAM doesn't apply to (no change needed)
  Operand of INVLPG
  Linear address in INVPCID descriptor

Co-developed-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
---
arch/x86/kvm/emulate.c    | 25 +++++++++++++++++--------
arch/x86/kvm/vmx/nested.c |  2 ++
arch/x86/kvm/vmx/sgx.c    |  1 +
arch/x86/kvm/x86.c        |  4 ++++
4 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a630c5db971c..c46f0162498e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -688,7 +688,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
-				       enum x86emul_mode mode, ulong *linear)
+				       enum x86emul_mode mode, ulong *linear,
+				       u64 untag_flags)
{
	struct desc_struct desc;
	bool usable;
@@ -701,9 +702,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
	*max_size = 0;
	switch (mode) {
	case X86EMUL_MODE_PROT64:
-		*linear = la;
+		*linear = static_call(kvm_x86_untag_addr)(ctxt->vcpu, la, untag_flags);
+
		va_bits = ctxt_virt_addr_bits(ctxt);
-		if (!__is_canonical_address(la, va_bits))
+		if (!__is_canonical_address(*linear, va_bits))
			goto bad;

		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
@@ -757,8 +759,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
		     ulong *linear)
{
	unsigned max_size;
-	return __linearize(ctxt, addr, &max_size, size, write, false,
-			   ctxt->mode, linear);
+	return __linearize(ctxt, addr, &max_size, size, false, false,
							^^^^^
							Should be "write".

Oops, thanks for catching it.



+			   ctxt->mode, linear, 0);
}

static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@@ -771,7 +773,9 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+	/* skip LAM untag for instruction */
I think it would be more accurate to quote the spec:

LAM does not apply to addresses used for instruction fetches or to those
that specify the targets of jump and call instructions


OK.


+	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
+		         &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
@@ -906,9 +910,11 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
+	 *
+	 * skip LAM untag for instruction
ditto



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux