Move code in vmx.c to get cache disabled memtype when non-coherent DMA present to x86 common code. This is the preparation patch for later implementation of fine-grained gfn zap for CR0.CD toggles when guest MTRRs are honored. No functional change intended. Signed-off-by: Yan Zhao <yan.y.zhao@xxxxxxxxx> --- arch/x86/kvm/mtrr.c | 19 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 10 +++++----- arch/x86/kvm/x86.h | 1 + 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 3ce58734ad22..b35dd0bc9cad 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -721,3 +721,22 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, return type == mtrr_default_type(mtrr_state); } + +void kvm_mtrr_get_cd_memory_type(struct kvm_vcpu *vcpu, u8 *type, bool *ipat) +{ + /* + * this routine is supposed to be called when guest mtrrs are honored + */ + if (unlikely(!kvm_mmu_honors_guest_mtrrs(vcpu->kvm))) { + *type = MTRR_TYPE_WRBACK; + *ipat = true; + } else if (unlikely(!kvm_check_has_quirk(vcpu->kvm, + KVM_X86_QUIRK_CD_NW_CLEARED))) { + *type = MTRR_TYPE_UNCACHABLE; + *ipat = true; + } else { + *type = MTRR_TYPE_WRBACK; + *ipat = false; + } +} +EXPORT_SYMBOL_GPL(kvm_mtrr_get_cd_memory_type); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c1e93678cea4..6414c5a6e892 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7573,11 +7573,11 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) { - if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) - return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT; - else - return (MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT) | - VMX_EPT_IPAT_BIT; + bool ipat; + u8 cache; + + kvm_mtrr_get_cd_memory_type(vcpu, &cache, &ipat); + return cache << VMX_EPT_MT_EPTE_SHIFT | (ipat ? VMX_EPT_IPAT_BIT : 0); } return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 82e3dafc5453..9781b4b32d68 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -313,6 +313,7 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); +void kvm_mtrr_get_cd_memory_type(struct kvm_vcpu *vcpu, u8 *type, bool *ipat); bool kvm_vector_hashing_enabled(void); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, -- 2.17.1