On Wed, Jun 22, 2022, Peter Xu wrote: > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index e92f1ab63d6a..b39acb7cb16d 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3012,6 +3012,13 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) > static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, > unsigned int access) > { > + /* NOTE: not all error pfn is fatal; handle intr before the other ones */ > + if (unlikely(is_intr_pfn(fault->pfn))) { > + vcpu->run->exit_reason = KVM_EXIT_INTR; > + ++vcpu->stat.signal_exits; > + return -EINTR; > + } > + > /* The pfn is invalid, report the error! */ > if (unlikely(is_error_pfn(fault->pfn))) > return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn); > @@ -4017,6 +4024,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > } > } > > + /* Allow to respond to generic signals in slow page faults */ "slow" is being overloaded here. The previous call __gfn_to_pfn_memslot() will end up in hva_to_pfn_slow(), but because of passing a non-null async it won't wait. This code really should have a more extensive comment irrespective of the interruptible stuff, now would be a good time to add that. Comments aside, isn't this series incomplete from the perspective that there are still many flows where KVM will hang if gfn_to_pfn() gets stuck in gup? E.g. if KVM is retrieving a page pointed at by vmcs12. > + flags |= KVM_GTP_INTERRUPTIBLE; > fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, flags, NULL, > &fault->map_writable, &fault->hva); > return RET_PF_CONTINUE; > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 4f84a442f67f..c8d98e435537 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -1163,6 +1163,7 @@ typedef unsigned int __bitwise kvm_gtp_flag_t; > > #define KVM_GTP_WRITE ((__force kvm_gtp_flag_t) BIT(0)) > #define KVM_GTP_ATOMIC ((__force kvm_gtp_flag_t) BIT(1)) > +#define KVM_GTP_INTERRUPTIBLE ((__force kvm_gtp_flag_t) BIT(2)) > > kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, > kvm_gtp_flag_t gtp_flags, bool *async, > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 952400b42ee9..b3873cac5672 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -2462,6 +2462,8 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, > flags |= FOLL_WRITE; > if (async) > flags |= FOLL_NOWAIT; > + if (gtp_flags & KVM_GTP_INTERRUPTIBLE) > + flags |= FOLL_INTERRUPTIBLE; > > npages = get_user_pages_unlocked(addr, 1, &page, flags); > if (npages != 1) > @@ -2599,6 +2601,8 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, kvm_gtp_flag_t gtp_flags, bool *async, > npages = hva_to_pfn_slow(addr, async, gtp_flags, writable, &pfn); > if (npages == 1) > return pfn; > + if (npages == -EINTR) > + return KVM_PFN_ERR_INTR; > > mmap_read_lock(current->mm); > if (npages == -EHWPOISON || > -- > 2.32.0 >