On Fri, Dec 09, 2022 at 12:45:54PM +0800, Robert Hoo wrote: > Define kvm_untagged_addr() per LAM feature spec: Address high bits are sign > extended, from highest effective address bit. > Note that LAM_U48 and LA57 has some effective bits overlap. This patch > gives a WARN() on that case. > > Now the only applicable possible case that addresses passed down from VM > with LAM bits is those for MPX MSRs. How about the instruction emulation case ? e.g. KVM on behalf of CPU to do linear address accessing ? In this case the kvm_untagged_addr() should also be used to mask out the linear address, otherwise unexpected #GP(or other exception) will be injected into guest. Please see all callers of __is_canonical_address() > > Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx> > Reviewed-by: Jingqi Liu <jingqi.liu@xxxxxxxxx> > --- > arch/x86/kvm/vmx/vmx.c | 3 +++ > arch/x86/kvm/x86.c | 5 +++++ > arch/x86/kvm/x86.h | 37 +++++++++++++++++++++++++++++++++++++ > 3 files changed, 45 insertions(+) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 9985dbb63e7b..16ddd3fcd3cb 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -2134,6 +2134,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > (!msr_info->host_initiated && > !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) > return 1; > + > + data = kvm_untagged_addr(data, vcpu); > + > if (is_noncanonical_address(data & PAGE_MASK, vcpu) || > (data & MSR_IA32_BNDCFGS_RSVD)) > return 1; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index eb1f2c20e19e..0a446b45e3d6 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1812,6 +1812,11 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, > case MSR_KERNEL_GS_BASE: > case MSR_CSTAR: > case MSR_LSTAR: > + /* > + * LAM applies only addresses used for data accesses. > + * Tagged address should never reach here. > + * Strict canonical check still applies here. > + */ > if (is_noncanonical_address(data, vcpu)) > return 1; > break; > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > index 6c1fbe27616f..f5a2a15783c6 100644 > --- a/arch/x86/kvm/x86.h > +++ b/arch/x86/kvm/x86.h > @@ -195,11 +195,48 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) > return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; > } > > +static inline u64 get_canonical(u64 la, u8 vaddr_bits) > +{ > + return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); > +} > + > static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) > { > return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); > } > > +#ifdef CONFIG_X86_64 > +/* untag addr for guest, according to vCPU CR3 and CR4 settings */ > +static inline u64 kvm_untagged_addr(u64 addr, struct kvm_vcpu *vcpu) > +{ > + if (addr >> 63 == 0) { > + /* User pointers */ > + if (kvm_read_cr3(vcpu) & X86_CR3_LAM_U57) > + addr = get_canonical(addr, 57); > + else if (kvm_read_cr3(vcpu) & X86_CR3_LAM_U48) { > + /* > + * If guest enabled 5-level paging and LAM_U48, > + * bit 47 should be 0, bit 48:56 contains meta data > + * although bit 47:56 are valid 5-level address > + * bits. > + * If LAM_U48 and 4-level paging, bit47 is 0. > + */ > + WARN_ON(addr & _BITUL(47)); > + addr = get_canonical(addr, 48); > + } > + } else if (kvm_read_cr4(vcpu) & X86_CR4_LAM_SUP) { /* Supervisor pointers */ > + if (kvm_read_cr4(vcpu) & X86_CR4_LA57) > + addr = get_canonical(addr, 57); > + else > + addr = get_canonical(addr, 48); > + } > + > + return addr; > +} > +#else > +#define kvm_untagged_addr(addr, vcpu) (addr) > +#endif > + > static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, > gva_t gva, gfn_t gfn, unsigned access) > { > -- > 2.31.1 >