Add a helper, is_transparent_hugepage(), to explicitly check whether a compound page is a THP and use it when populating KVM's secondary MMU. The explicit check fixes a bug where a remapped compound page, e.g. for an XDP Rx socket, is mapped into a KVM guest and is mistaken for a THP, which results in KVM incorrectly creating a huge page in its secondary MMU. Fixes: 936a5fe6e6148 ("thp: kvm mmu transparent hugepage support") Reported-by: syzbot+c9d1fb51ac9d0d10c39d@xxxxxxxxxxxxxxxxxxxxxxxxx Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 4 ++-- include/linux/huge_mm.h | 6 ++++++ include/linux/kvm_host.h | 1 + mm/huge_memory.c | 11 +++++++++++ virt/kvm/arm/mmu.c | 8 +------- virt/kvm/kvm_main.c | 10 ++++++++++ 6 files changed, 31 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8e822c09170d..ca14c84c4f4b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3344,7 +3344,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, */ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - PageTransCompoundMap(pfn_to_page(pfn))) { + kvm_is_transparent_hugepage(pfn)) { unsigned long mask; /* @@ -5959,7 +5959,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, */ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn) && - PageTransCompoundMap(pfn_to_page(pfn))) { + kvm_is_transparent_hugepage(pfn)) { pte_list_remove(rmap_head, sptep); if (kvm_available_flush_tlb_with_range()) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 93d5cf0bc716..5e154fad2f98 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -160,6 +160,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp, extern void prep_transhuge_page(struct page *page); extern void free_transhuge_page(struct page *page); +bool is_transparent_hugepage(struct page *page); bool can_split_huge_page(struct page *page, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); @@ -310,6 +311,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, static inline void prep_transhuge_page(struct page *page) {} +static inline bool is_transparent_hugepage(struct page *page) +{ + return false; +} + #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 339de08e5fa2..411b71a02f25 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -985,6 +985,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); +bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 13cc93785006..94c85a5da041 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -527,6 +527,17 @@ void prep_transhuge_page(struct page *page) set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); } +bool is_transparent_hugepage(struct page *page) +{ + if (!PageCompound(page)) + return 0; + + page = compound_head(page); + return is_huge_zero_page(page) || + page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; +} +EXPORT_SYMBOL_GPL(is_transparent_hugepage); + static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, loff_t off, unsigned long flags, unsigned long size) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 38b4c910b6c3..6e29d0c5062c 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1372,14 +1372,8 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) { kvm_pfn_t pfn = *pfnp; gfn_t gfn = *ipap >> PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - /* - * PageTransCompoundMap() returns true for THP and - * hugetlbfs. Make sure the adjustment is done only for THP - * pages. - */ - if (!PageHuge(page) && PageTransCompoundMap(page)) { + if (kvm_is_transparent_hugepage(pfn)) { unsigned long mask; /* * The address we faulted on is backed by a transparent huge diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3aa21bec028d..e8ca8bf12320 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -191,6 +191,16 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) return true; } +bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) +{ + struct page *page = pfn_to_page(pfn); + + if (!PageTransCompoundMap(page)) + return false; + + return is_transparent_hugepage(compound_head(page)); +} + /* * Switches to specified vcpu, until a matching vcpu_put() */ -- 2.24.1