On Wed, Nov 10, 2021 at 02:29:55PM -0800, Ben Gardon wrote: > When preparing to free disconnected SPs, the list can accumulate many > entries; enough that it is likely necessary to yeild while queuing RCU > callbacks to free the SPs. > > Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> > --- > arch/x86/kvm/mmu/tdp_mmu.c | 18 +++++++++++++++--- > 1 file changed, 15 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > index a448f0f2d993..c2a9f7acf8ef 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.c > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > @@ -513,7 +513,8 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > * being removed from the paging structure and this function being called. > */ > static void handle_disconnected_sps(struct kvm *kvm, > - struct list_head *disconnected_sps) > + struct list_head *disconnected_sps, > + bool can_yield, bool shared) > { > struct kvm_mmu_page *sp; > struct kvm_mmu_page *next; > @@ -521,6 +522,16 @@ static void handle_disconnected_sps(struct kvm *kvm, > list_for_each_entry_safe(sp, next, disconnected_sps, link) { > list_del(&sp->link); > call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); > + > + if (can_yield && > + (need_resched() || rwlock_needbreak(&kvm->mmu_lock))) { > + rcu_read_unlock(); > + if (shared) > + cond_resched_rwlock_read(&kvm->mmu_lock); > + else > + cond_resched_rwlock_write(&kvm->mmu_lock); > + rcu_read_lock(); > + } What about something like this to cut down on the duplicate code? diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index c2a9f7acf8ef..2fd010f2421e 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -508,6 +508,26 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, new_spte, level); } +static inline bool tdp_mmu_need_resched(struct kvm *kvm) +{ + return need_resched() || rwlock_needbreak(&kvm->mmu_lock); +} + +static void tdp_mmu_cond_resched(struct kvm *kvm, bool shared, bool flush) +{ + rcu_read_unlock() + + if (flush) + kvm_flush_remote_tlbs(kvm); + + if (shared) + cond_resched_rwlock_read(&kvm->mmu_lock); + else + cond_resched_rwlock_write(&kvm->mmu_lock); + + rcu_read_lock(); +} + /* * The TLBs must be flushed between the pages linked from disconnected_sps * being removed from the paging structure and this function being called. @@ -523,15 +543,8 @@ static void handle_disconnected_sps(struct kvm *kvm, list_del(&sp->link); call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); - if (can_yield && - (need_resched() || rwlock_needbreak(&kvm->mmu_lock))) { - rcu_read_unlock(); - if (shared) - cond_resched_rwlock_read(&kvm->mmu_lock); - else - cond_resched_rwlock_write(&kvm->mmu_lock); - rcu_read_lock(); - } + if (can_yield && tdp_mmu_need_resched(kvm)) + tdp_mmu_cond_resched(kvm, shared, false); } } @@ -724,18 +737,8 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, if (iter->next_last_level_gfn == iter->yielded_gfn) return false; - if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { - rcu_read_unlock(); - - if (flush) - kvm_flush_remote_tlbs(kvm); - - if (shared) - cond_resched_rwlock_read(&kvm->mmu_lock); - else - cond_resched_rwlock_write(&kvm->mmu_lock); - - rcu_read_lock(); + if (tdp_mmu_need_resched(kvm)) { + tdp_mmu_cond_resched(kvm, shared, flush); WARN_ON(iter->gfn > iter->next_last_level_gfn); > } > } > > @@ -599,7 +610,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, > */ > WRITE_ONCE(*rcu_dereference(iter->sptep), 0); > > - handle_disconnected_sps(kvm, &disconnected_sps); > + handle_disconnected_sps(kvm, &disconnected_sps, false, true); > > return true; > } > @@ -817,7 +828,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, > > if (!list_empty(&disconnected_sps)) { > kvm_flush_remote_tlbs(kvm); > - handle_disconnected_sps(kvm, &disconnected_sps); > + handle_disconnected_sps(kvm, &disconnected_sps, > + can_yield, shared); > flush = false; > } > > -- > 2.34.0.rc0.344.g81b53c2807-goog >