From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Use the bit SPTE_PRIVATE_PROHIBIT in shared and private EPT to determine which mapping, shared or private, is allowed. If requested mapping isn't allowed, return RET_PF_RETRY to wait for other vcpu to change it. Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- arch/x86/kvm/mmu/spte.h | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 25dffdb488d1..9c37381a6762 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -223,7 +223,7 @@ extern u64 __read_mostly shadow_init_value; static inline bool is_removed_spte(u64 spte) { - return spte == SHADOW_REMOVED_SPTE; + return (spte & ~SPTE_PRIVATE_PROHIBIT) == SHADOW_REMOVED_SPTE; } static inline bool is_private_prohibit_spte(u64 spte) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6d750563824d..f6bd35831e32 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1038,9 +1038,25 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, WARN_ON(sp->role.level != fault->goal_level); - /* TDX shared GPAs are no executable, enforce this for the SDV. */ - if (!kvm_is_private_gfn(vcpu->kvm, iter->gfn)) - pte_access &= ~ACC_EXEC_MASK; + if (kvm_gfn_stolen_mask(vcpu->kvm)) { + if (is_private_spte(iter->sptep)) { + /* + * This GPA is not allowed to map as private. Let + * vcpu loop in page fault until other vcpu change it + * by MapGPA hypercall. + */ + if (fault->slot && + is_private_prohibit_spte(iter->old_spte)) + return RET_PF_RETRY; + } else { + /* This GPA is not allowed to map as shared. */ + if (fault->slot && + !is_private_prohibit_spte(iter->old_spte)) + return RET_PF_RETRY; + /* TDX shared GPAs are no executable, enforce this. */ + pte_access &= ~ACC_EXEC_MASK; + } + } if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, -- 2.25.1