This callback is used by the KVM MMU to check whether a #NPF was or a private GPA or not. Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 3 +-- arch/x86/kvm/mmu/mmu_internal.h | 40 +++++++++++++++++++++++++++--- 4 files changed, 39 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index f530a550c092..efae987cdce0 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -132,6 +132,7 @@ KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled); +KVM_X86_OP_OPTIONAL_RET0(fault_is_private); #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9317abffbf68..92539708f062 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1636,6 +1636,7 @@ struct kvm_x86_ops { void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); int (*private_mem_enabled)(struct kvm *kvm); + int (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault); bool (*has_wbinvd_exit)(void); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b3ffc61c668c..61a7c221b966 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5646,8 +5646,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err } if (r == RET_PF_INVALID) { - r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, - lower_32_bits(error_code), false); + r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false); if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm)) return -EIO; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index e2f508db0b6e..04ea8da86510 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -230,6 +230,38 @@ struct kvm_page_fault { int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); +static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err) +{ + struct kvm_memory_slot *slot; + bool private_fault = false; + gfn_t gfn = gpa_to_gfn(gpa); + + slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + pr_debug("%s: no slot, GFN: 0x%llx\n", __func__, gfn); + goto out; + } + + if (!kvm_slot_can_be_private(slot)) { + pr_debug("%s: slot is not private, GFN: 0x%llx\n", __func__, gfn); + goto out; + } + + if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault) == 1) + goto out; + + /* + * Handling below is for UPM self-tests and guests that use + * slot->shared_bitmap for encrypted access tracking. + */ + if (IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING)) + private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT); + +out: + pr_debug("%s: GFN: 0x%llx, private: %d\n", __func__, gfn, private_fault); + return private_fault; +} + /* * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), * and of course kvm_mmu_do_page_fault(). @@ -261,13 +293,13 @@ enum { }; static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 err, bool prefetch) + u64 err, bool prefetch) { bool is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault); struct kvm_page_fault fault = { .addr = cr2_or_gpa, - .error_code = err, + .error_code = lower_32_bits(err), .exec = err & PFERR_FETCH_MASK, .write = err & PFERR_WRITE_MASK, .present = err & PFERR_PRESENT_MASK, @@ -281,8 +313,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .max_level = KVM_MAX_HUGEPAGE_LEVEL, .req_level = PG_LEVEL_4K, .goal_level = PG_LEVEL_4K, - .is_private = IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) && is_tdp && - kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT), + .is_private = is_tdp && kvm_mmu_fault_is_private(vcpu->kvm, + cr2_or_gpa, err), }; int r; -- 2.25.1