On Wed, Dec 14, 2022 at 11:43:14AM +0000, "Huang, Kai" <kai.huang@xxxxxxxxx> wrote: > On Sat, 2022-10-29 at 23:23 -0700, isaku.yamahata@xxxxxxxxx wrote: > > +static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) > > +{ > > + if (is_td_vcpu(vcpu)) { > > + if (is_mmio) > > + return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; > > + return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT; > > + } > > + > > + return vmx_get_mt_mask(vcpu, gfn, is_mmio); > > +} > > So you are returning WB for _ALL_ guest memory, including shared. Wouldn't this > break MTRR handling for shared memory? For instance, IIUC we can still support > assigning a device to a TDX guest while the VT-d doesn't support coherent > memory, in which case guest's MTRR/PAT are honored. I think this should also > apply to TDX guest's shared memory? You're right. So here is the updated change. --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -798,11 +798,8 @@ static int vt_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { - if (is_td_vcpu(vcpu)) { - if (is_mmio) - return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; - return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT; - } + if (is_td_vcpu(vcpu)) + return tdx_get_mt_mask(vcpu, gfn, is_mmio); return vmx_get_mt_mask(vcpu, gfn, is_mmio); } diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index ac47b20e4e91..f1842eb32d6c 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -568,7 +568,31 @@ int tdx_vm_init(struct kvm *kvm) return 0; } +u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + if (is_mmio) + return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) + return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; + + /* Guest CR0 is unknown. Assume CR0.CD = 0. */ + + /* TDX private GPA is always WB. */ + if (gfn & kvm_gfn_shared_mask(vcpu->kvm)) + return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT; + + /* + * Because the definition of MTRR MSR is unaware of shared-bit, + * clear shared-bit. + */ + gfn = kvm_gfn_private(vcpu->kvm, gfn); + return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; +} + int tdx_vcpu_create(struct kvm_vcpu *vcpu) + + { struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm); struct vcpu_tdx *tdx = to_tdx(vcpu); diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 96f5546194d4..38fb888e1be9 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -168,6 +168,7 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector); void tdx_inject_nmi(struct kvm_vcpu *vcpu); +u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code); bool tdx_is_emulated_msr(u32 index, bool write); -- Isaku Yamahata <isaku.yamahata@xxxxxxxxx>