On 4/18/2023 11:28 AM, Zeng Guang wrote:
On 4/4/2023 9:09 PM, Binbin Wu wrote:
Untag address for 64-bit memory/mmio operand in instruction emulations
and vmexit handlers when LAM is applicable.
For instruction emulation, untag address in __linearize() before
canonical check. LAM doesn't apply to instruction fetch and invlpg,
use KVM_X86_UNTAG_ADDR_SKIP_LAM to skip LAM untag.
For vmexit handlings related to 64-bit linear address:
- Cases need to untag address
Operand(s) of VMX instructions and INVPCID
Operand(s) of SGX ENCLS
Linear address in INVVPID descriptor.
- Cases LAM doesn't apply to (no change needed)
Operand of INVLPG
Linear address in INVPCID descriptor
Co-developed-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
Tested-by: Xuelian Guo <xuelian.guo@xxxxxxxxx>
---
arch/x86/kvm/emulate.c | 23 ++++++++++++++++++-----
arch/x86/kvm/kvm_emulate.h | 2 ++
arch/x86/kvm/vmx/nested.c | 4 ++++
arch/x86/kvm/vmx/sgx.c | 1 +
arch/x86/kvm/x86.c | 10 ++++++++++
5 files changed, 35 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a20bec931764..b7df465eccf2 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -688,7 +688,8 @@ static __always_inline int __linearize(struct
x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
- enum x86emul_mode mode, ulong *linear)
+ enum x86emul_mode mode, ulong *linear,
+ u64 untag_flags)
IMO, here should be "u64 flags" instead of "u64 untag_flags". Emulator
can
use it as flag combination for other purpose.
yes, make sense with the advise you suggested in patch 3.
{
struct desc_struct desc;
bool usable;
@@ -701,6 +702,7 @@ static __always_inline int __linearize(struct
x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ la = ctxt->ops->untag_addr(ctxt, la, untag_flags);
*linear = la;
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(la, va_bits))
@@ -758,7 +760,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
- ctxt->mode, linear);
+ ctxt->mode, linear, 0);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong
dst)
@@ -771,7 +773,12 @@ static inline int assign_eip(struct
x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true,
ctxt->mode, &linear);
+ /*
+ * LAM does not apply to addresses used for instruction fetches
+ * or to those that specify the targets of jump and call
instructions
+ */
This api handles the target address of branch and call instructions. I
think it enough to only explain the exact case.
OK, will make it specific.
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
+ &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@@ -906,9 +913,12 @@ static int __do_insn_fetch_bytes(struct
x86_emulate_ctxt *ctxt, int op_size)
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
+ *
+ * LAM does not apply to addresses used for instruction fetches
+ * or to those that specify the targets of jump and call
instructions
Ditto.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true,
ctxt->mode,
- &linear);
+ &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;