This together with previous patch, ensures that kvm_zap_gfn_range doesn't race with page fault running on another vcpu, and will make this page fault code retry instead. This is based on a patch suggested by Sean Christopherson: https://lkml.org/lkml/2021/7/22/1025 Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 4 ++++ include/linux/kvm_host.h | 5 +++++ virt/kvm/kvm_main.c | 7 +++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9d78cb1c0f35..9da635e383c2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5640,6 +5640,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) write_lock(&kvm->mmu_lock); + kvm_inc_notifier_count(kvm, gfn_start, gfn_end); + if (kvm_memslots_have_rmaps(kvm)) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); @@ -5671,6 +5673,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) if (flush) kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); + kvm_dec_notifier_count(kvm, gfn_start, gfn_end); + write_unlock(&kvm->mmu_lock); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d6b4ad407b8..962e11a73e8e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -985,6 +985,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); #endif +void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); +void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); + long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a96cbe24c688..71042cd807b3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -608,7 +608,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); } -static void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, +void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, unsigned long end) { /* @@ -636,6 +636,7 @@ static void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, max(kvm->mmu_notifier_range_end, end); } } +EXPORT_SYMBOL_GPL(kvm_inc_notifier_count); static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) @@ -670,7 +671,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, return 0; } -static void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, +void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, unsigned long end) { /* @@ -687,6 +688,8 @@ static void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, */ kvm->mmu_notifier_count--; } +EXPORT_SYMBOL_GPL(kvm_dec_notifier_count); + static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, const struct mmu_notifier_range *range) -- 2.26.3