Add an extra param "kvm" to make_spte() to allow param "vcpu" to be NULL in future to allow generating spte in non-vcpu context. "vcpu" is only used in make_spte() to get memory type mask if shadow_memtype_mask is true, which applies only to VMX when EPT is enabled. VMX only requires param "vcpu" when non-coherent DMA devices are attached to check vcpu's CR0.CD and guest MTRRs. So, if non-coherent DMAs are not attached, make_spte() can call kvm_x86_get_default_mt_mask() to get default memory type for non-vCPU context. This is a preparation patch for later KVM MMU to export TDP. Signed-off-by: Yan Zhao <yan.y.zhao@xxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/mmu/spte.c | 18 ++++++++++++------ arch/x86/kvm/mmu/spte.h | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 5 files changed, 16 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e4cae4ff20770..c9b587b30dae3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2939,7 +2939,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, was_rmapped = 1; } - wrprot = make_spte(vcpu, &vcpu->arch.mmu->common, + wrprot = make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, true, host_writable, &spte); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 054d1a203f0ca..fb4767a9e966e 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -960,7 +960,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int spte = *sptep; host_writable = spte & shadow_host_writable_mask; slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - make_spte(vcpu, &vcpu->arch.mmu->common, sp, slot, pte_access, + make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common, sp, slot, pte_access, gfn, spte_to_pfn(spte), spte, true, false, host_writable, &spte); return mmu_spte_update(sptep, spte); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index daeab3b9eee1e..5e73a679464c0 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -138,7 +138,7 @@ bool spte_has_volatile_bits(u64 spte) return false; } -bool make_spte(struct kvm_vcpu *vcpu, +bool make_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, struct kvm_mmu_common *mmu_common, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, @@ -179,7 +179,7 @@ bool make_spte(struct kvm_vcpu *vcpu, * just to optimize a mode that is anything but performance critical. */ if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && - is_nx_huge_page_enabled(vcpu->kvm)) { + is_nx_huge_page_enabled(kvm)) { pte_access &= ~ACC_EXEC_MASK; } @@ -194,9 +194,15 @@ bool make_spte(struct kvm_vcpu *vcpu, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; - if (shadow_memtype_mask) - spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, + if (shadow_memtype_mask) { + if (vcpu) + spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); + else + spte |= static_call(kvm_x86_get_default_mt_mask)(kvm, + kvm_is_mmio_pfn(pfn)); + } + if (host_writable) spte |= shadow_host_writable_mask; else @@ -225,7 +231,7 @@ bool make_spte(struct kvm_vcpu *vcpu, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { + if (mmu_try_to_unsync_pages(kvm, slot, gfn, can_unsync, prefetch)) { wrprot = true; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); @@ -246,7 +252,7 @@ bool make_spte(struct kvm_vcpu *vcpu, if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { /* Enforced by kvm_mmu_hugepage_adjust. */ WARN_ON_ONCE(level > PG_LEVEL_4K); - mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); + mark_page_dirty_in_slot(kvm, slot, gfn); } *new_spte = spte; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 4ad19c469bd73..f1532589b7083 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -530,7 +530,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) bool spte_has_volatile_bits(u64 spte); -bool make_spte(struct kvm_vcpu *vcpu, +bool make_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, struct kvm_mmu_common *mmu_common, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 892cf1f5b57a8..a45d1b71cd62a 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -964,7 +964,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu->kvm, vcpu, iter->gfn, ACC_ALL); else - wrprot = make_spte(vcpu, &vcpu->arch.mmu->common, sp, fault->slot, + wrprot = make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common, sp, fault->slot, ACC_ALL, iter->gfn, fault->pfn, iter->old_spte, fault->prefetch, true, fault->map_writable, &new_spte); -- 2.17.1