On Sat, Jul 23, 2022 at 01:23:23AM +0000, Sean Christopherson wrote: <snip> > @@ -386,16 +385,18 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, > static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, > bool shared) > { > + atomic64_dec(&kvm->arch.tdp_mmu_pages); > + > + if (!sp->nx_huge_page_disallowed) > + return; > + Does this read of sp->nx_huge_page_disallowed also need to be protected by tdp_mmu_pages_lock in shared path? Thanks Yan > if (shared) > spin_lock(&kvm->arch.tdp_mmu_pages_lock); > else > lockdep_assert_held_write(&kvm->mmu_lock); > > - list_del(&sp->link); > - if (sp->nx_huge_page_disallowed) { > - sp->nx_huge_page_disallowed = false; > - untrack_possible_nx_huge_page(kvm, sp); > - } > + sp->nx_huge_page_disallowed = false; > + untrack_possible_nx_huge_page(kvm, sp); > > if (shared) > spin_unlock(&kvm->arch.tdp_mmu_pages_lock); > @@ -1132,9 +1133,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, > tdp_mmu_set_spte(kvm, iter, spte); > } > > - spin_lock(&kvm->arch.tdp_mmu_pages_lock); > - list_add(&sp->link, &kvm->arch.tdp_mmu_pages); > - spin_unlock(&kvm->arch.tdp_mmu_pages_lock); > + atomic64_inc(&kvm->arch.tdp_mmu_pages); > > return 0; > } > -- > 2.37.1.359.gd136c6c3e2-goog >