Add a helper, kvm_is_hugepage_allowed(), to consolidate identical code across transparent_hugepage_adjust() and kvm_mmu_zap_collapsible_spte(). No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/mmu.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c35c6fb2635a..95163f3abb24 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3292,6 +3292,12 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) return -EFAULT; } +static bool kvm_is_hugepage_allowed(kvm_pfn_t pfn) +{ + return !kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn) && + PageTransCompoundMap(pfn_to_page(pfn)); +} + static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) @@ -3305,9 +3311,8 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, * PT_PAGE_TABLE_LEVEL and there would be no adjustment done * here. */ - if (!is_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && - !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - PageTransCompoundMap(pfn_to_page(pfn)) && + if (!is_noslot_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && + kvm_is_hugepage_allowed(pfn) && !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; /* @@ -5914,9 +5919,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, * the guest, and the guest page table is using 4K page size * mapping if the indirect sp has level = 1. */ - if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && - !kvm_is_zone_device_pfn(pfn) && - PageTransCompoundMap(pfn_to_page(pfn))) { + if (sp->role.direct && kvm_is_hugepage_allowed(pfn)) { pte_list_remove(rmap_head, sptep); if (kvm_available_flush_tlb_with_range()) -- 2.24.0