When the memslot flag is enabled, fail guest memory accesses for which fast-gup fails (ie, for which the mappings are not present). Suggested-by: James Houghton <jthoughton@xxxxxxxxxx> Signed-off-by: Anish Moorthy <amoorthy@xxxxxxxxxx> --- Documentation/virt/kvm/api.rst | 2 +- arch/x86/kvm/mmu/mmu.c | 17 ++++++++++++----- arch/x86/kvm/x86.c | 1 + 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 7967b9909e28b..452bbca800b15 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7712,7 +7712,7 @@ reported to the maintainers. 7.35 KVM_CAP_ABSENT_MAPPING_FAULT --------------------------------- -:Architectures: None +:Architectures: x86 :Returns: -EINVAL. The presence of this capability indicates that userspace may pass the diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d83a3e1e3eff9..4aef79b97c985 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4218,7 +4218,9 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL); } -static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, + bool fault_on_absent_mapping) { struct kvm_memory_slot *slot = fault->slot; bool async; @@ -4251,9 +4253,12 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault } async = false; - fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async, - fault->write, &fault->map_writable, - &fault->hva); + + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, + fault_on_absent_mapping, false, + fault_on_absent_mapping ? NULL : &async, + fault->write, &fault->map_writable, &fault->hva); + if (!async) return RET_PF_CONTINUE; /* *pfn has correct page already */ @@ -4287,7 +4292,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; smp_rmb(); - ret = __kvm_faultin_pfn(vcpu, fault); + ret = __kvm_faultin_pfn(vcpu, fault, + likely(fault->slot) + && kvm_slot_fault_on_absent_mapping(fault->slot)); if (ret != RET_PF_CONTINUE) return ret; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3e9deab31e1c8..bc465cde7acf6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4433,6 +4433,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ENABLE_CAP: case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: case KVM_CAP_MEMORY_FAULT_INFO: + case KVM_CAP_ABSENT_MAPPING_FAULT: r = 1; break; case KVM_CAP_EXIT_HYPERCALL: -- 2.40.0.577.gac1e443424-goog