From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> kvm_mmu_invalidate_end() updates struct kvm::mmu_invalidate_in_progress and it's protected by kvm::mmu_lock. call kvm_mmu_invalidate_end() before unlocking it. Not after the unlock. Fixes: 8e9009ca6d14 ("KVM: Introduce per-page memory attributes") Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- virt/kvm/kvm_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8bfeb615fc4d..49380cd62367 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -535,6 +535,7 @@ struct kvm_mmu_notifier_range { } arg; gfn_handler_t handler; on_lock_fn_t on_lock; + on_unlock_fn_t before_unlock; on_unlock_fn_t on_unlock; bool flush_on_ret; bool may_block; @@ -629,6 +630,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, kvm_flush_remote_tlbs(kvm); if (locked) { + if (!IS_KVM_NULL_FN(range->before_unlock)) + range->before_unlock(kvm); KVM_MMU_UNLOCK(kvm); if (!IS_KVM_NULL_FN(range->on_unlock)) range->on_unlock(kvm); @@ -653,6 +656,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, .arg.pte = pte, .handler = handler, .on_lock = (void *)kvm_null_fn, + .before_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, .flush_on_ret = true, .may_block = false, @@ -672,6 +676,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn .end = end, .handler = handler, .on_lock = (void *)kvm_null_fn, + .before_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, .flush_on_ret = false, .may_block = false, @@ -776,6 +781,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, .end = range->end, .handler = kvm_mmu_unmap_gfn_range, .on_lock = kvm_mmu_invalidate_begin, + .before_unlock = (void *)kvm_null_fn, .on_unlock = kvm_arch_guest_memory_reclaimed, .flush_on_ret = true, .may_block = mmu_notifier_range_blockable(range), @@ -815,6 +821,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, void kvm_mmu_invalidate_end(struct kvm *kvm) { + lockdep_assert_held_write(&kvm->mmu_lock); + /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have @@ -846,6 +854,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, .end = range->end, .handler = (void *)kvm_null_fn, .on_lock = kvm_mmu_invalidate_end, + .before_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, .flush_on_ret = false, .may_block = mmu_notifier_range_blockable(range), @@ -2433,6 +2442,8 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm, kvm_flush_remote_tlbs(kvm); if (locked) { + if (!IS_KVM_NULL_FN(range->before_unlock)) + range->before_unlock(kvm); KVM_MMU_UNLOCK(kvm); if (!IS_KVM_NULL_FN(range->on_unlock)) range->on_unlock(kvm); @@ -2447,6 +2458,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, unsigned long attributes, .end = end, .handler = kvm_mmu_unmap_gfn_range, .on_lock = kvm_mmu_invalidate_begin, + .before_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, .flush_on_ret = true, .may_block = true, @@ -2457,7 +2469,8 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, unsigned long attributes, .arg.attributes = attributes, .handler = kvm_arch_post_set_memory_attributes, .on_lock = (void *)kvm_null_fn, - .on_unlock = kvm_mmu_invalidate_end, + .before_unlock = kvm_mmu_invalidate_end, + .on_unlock = (void *)kvm_null_fn, .may_block = true, }; unsigned long i; -- 2.25.1