Use vendor code via kvm_x86_ops hooks for pinning. Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/mmu/mmu.c | 15 +++++++++++++++ 3 files changed, 19 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index d39e0de06be2..8efb43d92eef 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -88,6 +88,7 @@ KVM_X86_OP(set_tss_addr) KVM_X86_OP(set_identity_map_addr) KVM_X86_OP(get_mt_mask) KVM_X86_OP(load_mmu_pgd) +KVM_X86_OP(pin_pfn) KVM_X86_OP_NULL(has_wbinvd_exit) KVM_X86_OP(get_l2_tsc_offset) KVM_X86_OP(get_l2_tsc_multiplier) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ec9830d2aabf..df11f1fb76de 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1418,6 +1418,9 @@ struct kvm_x86_ops { void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); + bool (*pin_pfn)(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + kvm_pfn_t pfn, hva_t hva, bool write, + enum pg_level level); bool (*has_wbinvd_exit)(void); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index db1feecd6fed..b94e5e71653e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4001,6 +4001,16 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva); } +static bool kvm_pin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +{ + if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn) || + !kvm_x86_ops.pin_pfn) + return true; + + return kvm_x86_ops.pin_pfn(vcpu, fault->slot, fault->pfn, fault->hva, + fault->write, fault->goal_level); +} + static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); @@ -4035,6 +4045,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault kvm_mmu_hugepage_adjust(vcpu, fault); + if (memslot_is_encrypted(fault->slot) && !kvm_pin_pfn(vcpu, fault)) + goto out_release; + if (is_tdp_mmu_fault) read_lock(&vcpu->kvm->mmu_lock); else @@ -4057,6 +4070,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault read_unlock(&vcpu->kvm->mmu_lock); else write_unlock(&vcpu->kvm->mmu_lock); + +out_release: kvm_release_pfn_clean(fault->pfn); return r; } -- 2.32.0