Just do atomic gfn_to_pfn_memslot when the cap is enabled. Since we don't have to deal with async page faults, the implementation is even simpler than on x86 Signed-off-by: Anish Moorthy <amoorthy@xxxxxxxxxx> Acked-by: James Houghton <jthoughton@xxxxxxxxxx> --- arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/mmu.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 698787ed87e92..31bec7866c346 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -220,6 +220,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: case KVM_CAP_ARM_SYSTEM_SUSPEND: + case KVM_CAP_MEM_FAULT_NOWAIT: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 01352f5838a00..964af7cd5f1c8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1206,6 +1206,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; + bool mem_fault_nowait; fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); @@ -1301,8 +1302,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ smp_rmb(); - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, - write_fault, &writable, NULL); + mem_fault_nowait = memory_faults_enabled(vcpu->kvm); + pfn = __gfn_to_pfn_memslot( + memslot, gfn, mem_fault_nowait, false, NULL, + write_fault, &writable, NULL); + + if (mem_fault_nowait && pfn == KVM_PFN_ERR_FAULT) { + vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; + vcpu->run->memory_fault.gpa = gfn << PAGE_SHIFT; + vcpu->run->memory_fault.size = vma_pagesize; + return -EFAULT; + } if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); return 0; -- 2.39.1.581.gbfd45094c4-goog