> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 9985dbb63e7b..16ddd3fcd3cb 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -2134,6 +2134,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > (!msr_info->host_initiated && > !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) > return 1; > + > + data = kvm_untagged_addr(data, vcpu); Do we really need to take pains to trigger the kvm_untagged_addr() unconditionally? I mean, LAM may not be enabled by the guest or even not exposed to the guest at all. > + > if (is_noncanonical_address(data & PAGE_MASK, vcpu) || > (data & MSR_IA32_BNDCFGS_RSVD)) > return 1; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index eb1f2c20e19e..0a446b45e3d6 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1812,6 +1812,11 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, > case MSR_KERNEL_GS_BASE: > case MSR_CSTAR: > case MSR_LSTAR: > + /* > + * LAM applies only addresses used for data accesses. > + * Tagged address should never reach here. > + * Strict canonical check still applies here. > + */ > if (is_noncanonical_address(data, vcpu)) > return 1; > break; > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > index 6c1fbe27616f..f5a2a15783c6 100644 > --- a/arch/x86/kvm/x86.h > +++ b/arch/x86/kvm/x86.h > @@ -195,11 +195,48 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) > return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; > } > > +static inline u64 get_canonical(u64 la, u8 vaddr_bits) > +{ > + return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); > +} > + We already have a __canonical_address() in linux, no need to re-invent another one. :) B.R. Yu