On Tue, Nov 05, 2024, James Houghton wrote: > This new function, tdp_mmu_clear_spte_bits_atomic(), will be used in a > follow-up patch to enable lockless Accessed and R/W/X bit clearing. This is a lie. tdp_mmu_clear_spte_bits_atomic() can only be used to clear the Accessed bit, clearing RWX bits for access-tracked SPTEs *must* be done with a CMPXCHG so that the original RWX protections are preserved. > Signed-off-by: James Houghton <jthoughton@xxxxxxxxxx> > --- > arch/x86/kvm/mmu/tdp_iter.h | 15 +++++++++------ > 1 file changed, 9 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h > index 2880fd392e0c..a24fca3f9e7f 100644 > --- a/arch/x86/kvm/mmu/tdp_iter.h > +++ b/arch/x86/kvm/mmu/tdp_iter.h > @@ -25,6 +25,13 @@ static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte) > return xchg(rcu_dereference(sptep), new_spte); > } > > +static inline u64 tdp_mmu_clear_spte_bits_atomic(tdp_ptep_t sptep, u64 mask) > +{ > + atomic64_t *sptep_atomic = (atomic64_t *)rcu_dereference(sptep); > + > + return (u64)atomic64_fetch_and(~mask, sptep_atomic); > +} > + > static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) > { > KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte)); > @@ -63,12 +70,8 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, > static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, > u64 mask, int level) > { > - atomic64_t *sptep_atomic; > - > - if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) { > - sptep_atomic = (atomic64_t *)rcu_dereference(sptep); > - return (u64)atomic64_fetch_and(~mask, sptep_atomic); > - } > + if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) > + return tdp_mmu_clear_spte_bits_atomic(sptep, mask); > > __kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask); > return old_spte; > -- > 2.47.0.199.ga7371fff76-goog >