> static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, > unsigned int access) > { > + struct kvm_memory_slot *slot = fault->slot; > int ret; > > fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; > smp_rmb(); > > + /* > + * Check for a relevant mmu_notifier invalidation event before getting > + * the pfn from the primary MMU, and before acquiring mmu_lock. > + * > + * For mmu_lock, if there is an in-progress invalidation and the kernel > + * allows preemption, the invalidation task may drop mmu_lock and yield > + * in response to mmu_lock being contended, which is *very* counter- > + * productive as this vCPU can't actually make forward progress until > + * the invalidation completes. > + * > + * Retrying now can also avoid unnessary lock contention in the primary > + * MMU, as the primary MMU doesn't necessarily hold a single lock for > + * the duration of the invalidation, i.e. faulting in a conflicting pfn > + * can cause the invalidation to take longer by holding locks that are > + * needed to complete the invalidation. > + * > + * Do the pre-check even for non-preemtible kernels, i.e. even if KVM > + * will never yield mmu_lock in response to contention, as this vCPU is > + * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held > + * to detect retry guarantees the worst case latency for the vCPU. > + */ > + if (!slot && typo? if (slot && Thanks, Yilun > + mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) > + return RET_PF_RETRY; > + > ret = __kvm_faultin_pfn(vcpu, fault); > if (ret != RET_PF_CONTINUE) > return ret;