Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce duplicated code at call sites and make the code more readable. Signed-off-by: David Matlack <dmatlack@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 10 +++++----- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- arch/x86/kvm/mmu/tdp_mmu.h | 8 +++++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0144c40d09c7..eccd889d20a5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3545,7 +3545,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) return reserved; } - if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + if (is_vcpu_using_tdp_mmu(vcpu)) leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); else leaf = get_walk(vcpu, addr, sptes, &root); @@ -3729,7 +3729,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (page_fault_handle_page_track(vcpu, error_code, gfn)) return RET_PF_EMULATE; - if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) { + if (!is_vcpu_using_tdp_mmu(vcpu)) { r = fast_page_fault(vcpu, gpa, error_code); if (r != RET_PF_INVALID) return r; @@ -3751,7 +3751,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, r = RET_PF_RETRY; - if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + if (is_vcpu_using_tdp_mmu(vcpu)) read_lock(&vcpu->kvm->mmu_lock); else write_lock(&vcpu->kvm->mmu_lock); @@ -3762,7 +3762,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (r) goto out_unlock; - if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + if (is_vcpu_using_tdp_mmu(vcpu)) r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level, pfn, prefault); else @@ -3770,7 +3770,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, prefault, is_tdp); out_unlock: - if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + if (is_vcpu_using_tdp_mmu(vcpu)) read_unlock(&vcpu->kvm->mmu_lock); else write_unlock(&vcpu->kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 237317b1eddd..f4cc79dabeae 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -979,7 +979,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) return RET_PF_RETRY; - if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))) + if (WARN_ON(!is_vcpu_using_tdp_mmu(vcpu))) return RET_PF_RETRY; level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 5fdf63090451..c8cf12809fcf 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -91,16 +91,18 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } #endif -static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) +static inline bool is_vcpu_using_tdp_mmu(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; struct kvm_mmu_page *sp; + hpa_t root_hpa = vcpu->arch.mmu->root_hpa; if (!is_tdp_mmu_enabled(kvm)) return false; - if (WARN_ON(!VALID_PAGE(hpa))) + if (WARN_ON(!VALID_PAGE(root_hpa))) return false; - sp = to_shadow_page(hpa); + sp = to_shadow_page(root_hpa); if (WARN_ON(!sp)) return false; -- 2.32.0.272.g935e593368-goog