Change the "atomic" parameter of __gfn_to_pfn_memslot() to an enum which reflects the possibility of allowig non-atomic accesses (GUP calls) being "upgraded" to atomic, and mark locations where such upgrading is allowed. Concerning gfn_to_pfn_prot(): this function is unused on x86, and the only usage on arm64 is from a codepath where upgrading gup calls to atomic based on the memslot is undesirable. Therefore, punt on adding any plumbing to expose the 'atomicity' parameter. Signed-off-by: Anish Moorthy <amoorthy@xxxxxxxxxx> --- arch/arm64/kvm/mmu.c | 4 +-- arch/powerpc/kvm/book3s_64_mmu_hv.c | 3 +- arch/powerpc/kvm/book3s_64_mmu_radix.c | 3 +- arch/x86/kvm/mmu/mmu.c | 8 +++--- include/linux/kvm_host.h | 14 +++++++++- virt/kvm/kvm_main.c | 38 ++++++++++++++++++++------ 6 files changed, 53 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8ede6c5edc5f..ac77ae5b5d2b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1502,8 +1502,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mmu_seq = vcpu->kvm->mmu_invalidate_seq; mmap_read_unlock(current->mm); - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, - write_fault, &writable, NULL); + pfn = __gfn_to_pfn_memslot(memslot, gfn, MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE, + false, NULL, write_fault, &writable, NULL); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); return 0; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7f765d5ad436..ab7caa86aa16 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -612,7 +612,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, write_ok = true; } else { /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, + pfn = __gfn_to_pfn_memslot(memslot, gfn, MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC, + false, NULL, writing, &write_ok, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 572707858d65..3fa05c8e96b0 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -846,7 +846,8 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long pfn; /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, + pfn = __gfn_to_pfn_memslot(memslot, gfn, MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC, + false, NULL, writing, upgrade_p, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index deae8ac74d9a..43516eb50e06 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4297,8 +4297,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault } async = false; - fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async, - fault->write, &fault->map_writable, + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE, + false, &async, fault->write, &fault->map_writable, &fault->hva); if (!async) return RET_PF_CONTINUE; /* *pfn has correct page already */ @@ -4319,8 +4319,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault * to wait for IO. Note, gup always bails if it is unable to quickly * get a page and a fatal signal, i.e. SIGKILL, is pending. */ - fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL, - fault->write, &fault->map_writable, + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE, + true, NULL, fault->write, &fault->map_writable, &fault->hva); return RET_PF_CONTINUE; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index db5c3eae58fe..fdd386e1d3c0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1188,8 +1188,20 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); +enum memslot_access_atomicity { + /* Force atomic access */ + MEMSLOT_ACCESS_ATOMIC, + /* + * Ask for non-atomic access, but allow upgrading to atomic depending + * on the memslot + */ + MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE, + /* Force non-atomic access */ + MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC +}; kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, - bool atomic, bool interruptible, bool *async, + enum memslot_access_atomicity atomicity, + bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva); void kvm_release_pfn_clean(kvm_pfn_t pfn); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index aa81e41b1488..d4f4ccb29e6d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2735,9 +2735,11 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, } kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, - bool atomic, bool interruptible, bool *async, + enum memslot_access_atomicity atomicity, + bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva) { + bool atomic; unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); if (hva) @@ -2759,6 +2761,23 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, writable = NULL; } + if (atomicity == MEMSLOT_ACCESS_ATOMIC) { + atomic = true; + } else if (atomicity == MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE) { + atomic = false; + if (kvm_is_slot_userfault_on_missing(slot)) { + atomic = true; + if (async) { + *async = false; + async = NULL; + } + } + } else if (atomicity == MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC) { + atomic = false; + } else { + BUG(); + } + return hva_to_pfn(addr, atomic, interruptible, async, write_fault, writable); } @@ -2767,22 +2786,23 @@ EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable) { - return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, - NULL, write_fault, writable, NULL); + return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, + MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC, + false, NULL, write_fault, writable, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { - return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true, - NULL, NULL); + return __gfn_to_pfn_memslot(slot, gfn, MEMSLOT_ACCESS_NONATOMIC_MAY_UPGRADE, + false, NULL, true, NULL, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) { - return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true, - NULL, NULL); + return __gfn_to_pfn_memslot(slot, gfn, MEMSLOT_ACCESS_ATOMIC, + false, NULL, true, NULL, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); @@ -2862,7 +2882,9 @@ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) if (!map) return -EINVAL; - pfn = gfn_to_pfn(vcpu->kvm, gfn); + pfn = __gfn_to_pfn_memslot(gfn_to_memslot(vcpu->kvm, gfn), gfn, + MEMSLOT_ACCESS_FORCE_ALLOW_NONATOMIC, + false, NULL, true, NULL, NULL); if (is_error_noslot_pfn(pfn)) return -EINVAL; -- 2.42.0.283.g2d96d420d3-goog