When in KVM emulation, calculated a LA for data access, apply LAM if guest is at that moment LAM active, so that the following canonical check can pass. Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx> --- arch/x86/kvm/emulate.c | 6 ++++++ arch/x86/kvm/x86.h | 13 +++++++++++++ 2 files changed, 19 insertions(+) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 5cc3efa0e21c..d52037151133 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -700,6 +700,12 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: + /* + * LAM applies only on data access + */ + if (!fetch && is_lam_active(ctxt->vcpu)) + la = kvm_untagged_addr(la, ctxt->vcpu); + *linear = la; va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 7228895d4a6f..9397e9f4e061 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -135,6 +135,19 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) #endif } +#ifdef CONFIG_X86_64 +static inline bool is_lam_active(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57) || + kvm_read_cr4_bits(vcpu, X86_CR4_LAM_SUP); +} +#else +static inline bool is_lam_active(struct kvm_vcpu *vcpu) +{ + return false; +} +#endif + static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; -- 2.31.1