On 30/09/20 08:15, Sean Christopherson wrote: >> kvm_zap_obsolete_pages(kvm); >> + >> + if (kvm->arch.tdp_mmu_enabled) >> + kvm_tdp_mmu_zap_all(kvm); > > Haven't looked into how this works; is kvm_tdp_mmu_zap_all() additive to > what is done by the legacy zapping, or is it a replacement? It's additive because the shadow MMU is still used for nesting. >> + >> spin_unlock(&kvm->mmu_lock); >> } >> @@ -57,8 +58,13 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) >> return root->tdp_mmu_page; >> } >> >> +static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, >> + gfn_t start, gfn_t end); >> + >> static void free_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root) >> { >> + gfn_t max_gfn = 1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT); > > BIT_ULL(...) Not sure about that. Here the point is not to have a single bit, but to do a power of two. Same for the version below. >> + * If the MMU lock is contended or this thread needs to yield, flushes >> + * the TLBs, releases, the MMU lock, yields, reacquires the MMU lock, >> + * restarts the tdp_iter's walk from the root, and returns true. >> + * If no yield is needed, returns false. >> + */ >> +static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) >> +{ >> + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { >> + kvm_flush_remote_tlbs(kvm); >> + cond_resched_lock(&kvm->mmu_lock); >> + tdp_iter_refresh_walk(iter); >> + return true; >> + } else { >> + return false; >> + } > > Kernel style is to not bother with an "else" if the "if" returns. I have rewritten all of this in my version anyway. :) Paolo