On Tue, 2024-05-14 at 17:59 -0700, Rick Edgecombe wrote: > +static inline int __tdp_mmu_set_spte_atomic(struct kvm *kvm, struct tdp_iter > *iter, u64 new_spte) > { > u64 *sptep = rcu_dereference(iter->sptep); > > @@ -542,15 +671,42 @@ static inline int __tdp_mmu_set_spte_atomic(struct > tdp_iter *iter, u64 new_spte) > */ > WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); > > - /* > - * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and > - * does not hold the mmu_lock. On failure, i.e. if a different > logical > - * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with > - * the current value, so the caller operates on fresh data, e.g. if it > - * retries tdp_mmu_set_spte_atomic() > - */ > - if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) > - return -EBUSY; > + if (is_private_sptep(iter->sptep) && !is_removed_spte(new_spte)) { > + int ret; > + > + if (is_shadow_present_pte(new_spte)) { > + /* > + * Populating case. > + * - set_private_spte_present() implements > + * 1) Freeze SPTE > + * 2) call hooks to update private page table, > + * 3) update SPTE to new_spte > + * - handle_changed_spte() only updates stats. > + */ > + ret = set_private_spte_present(kvm, iter->sptep, iter- > >gfn, > + iter->old_spte, > new_spte, iter->level); > + if (ret) > + return ret; > + } else { > + /* > + * Zapping case. > + * Zap is only allowed when write lock is held > + */ > + if (WARN_ON_ONCE(!is_shadow_present_pte(new_spte))) This inside an else block for (is_shadow_present_pte(new_spte)), so it will always be true if it gets here. But it can't because TDX doesn't do any atomic zapping. We can remove the conditional, but in regards to the WARN, any recollection of what was might have been going on here originally? > + return -EBUSY; > + }