The patch titled Subject: mm/gup: handle hugetlb for no_page_table() has been added to the -mm mm-unstable branch. Its filename is mm-gup-handle-hugetlb-for-no_page_table.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-gup-handle-hugetlb-for-no_page_table.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Peter Xu <peterx@xxxxxxxxxx> Subject: mm/gup: handle hugetlb for no_page_table() Date: Wed, 27 Mar 2024 11:23:27 -0400 no_page_table() is not yet used for hugetlb code paths. Make it prepared. The major difference here is hugetlb will return -EFAULT as long as page cache does not exist, even if VM_SHARED. See hugetlb_follow_page_mask(). Pass "address" into no_page_table() too, as hugetlb will need it. Link: https://lkml.kernel.org/r/20240327152332.950956-9-peterx@xxxxxxxxxx Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxxxxxxxxx> Reviewed-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Andrew Jones <andrew.jones@xxxxxxxxx> Cc: Aneesh Kumar K.V (IBM) <aneesh.kumar@xxxxxxxxxx> Cc: Axel Rasmussen <axelrasmussen@xxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: James Houghton <jthoughton@xxxxxxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> Cc: Lorenzo Stoakes <lstoakes@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: "Mike Rapoport (IBM)" <rppt@xxxxxxxxxx> Cc: Muchun Song <muchun.song@xxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/gup.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) --- a/mm/gup.c~mm-gup-handle-hugetlb-for-no_page_table +++ a/mm/gup.c @@ -501,19 +501,27 @@ static inline void mm_set_has_pinned_fla #ifdef CONFIG_MMU static struct page *no_page_table(struct vm_area_struct *vma, - unsigned int flags) + unsigned int flags, unsigned long address) { + if (!(flags & FOLL_DUMP)) + return NULL; + /* - * When core dumping an enormous anonymous area that nobody - * has touched so far, we don't want to allocate unnecessary pages or + * When core dumping, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ - if ((flags & FOLL_DUMP) && - (vma_is_anonymous(vma) || !vma->vm_ops->fault)) + if (is_vm_hugetlb_page(vma)) { + struct hstate *h = hstate_vma(vma); + + if (!hugetlbfs_pagecache_present(h, vma, address)) + return ERR_PTR(-EFAULT); + } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { return ERR_PTR(-EFAULT); + } + return NULL; } @@ -593,7 +601,7 @@ static struct page *follow_page_pte(stru ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; @@ -685,7 +693,7 @@ no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, @@ -701,27 +709,27 @@ static struct page *follow_pmd_mask(stru pmd = pmd_offset(pudp, address); pmdval = pmdp_get_lockless(pmd); if (pmd_none(pmdval)) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); if (!pmd_present(pmdval)) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); @@ -752,17 +760,17 @@ static struct page *follow_pud_mask(stru pud = pud_offset(p4dp, address); if (pud_none(*pud)) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); } if (unlikely(pud_bad(*pud))) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); return follow_pmd_mask(vma, address, pud, flags, ctx); } @@ -777,10 +785,10 @@ static struct page *follow_p4d_mask(stru p4dp = p4d_offset(pgdp, address); p4d = READ_ONCE(*p4dp); if (!p4d_present(p4d)) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); BUILD_BUG_ON(p4d_leaf(p4d)); if (unlikely(p4d_bad(p4d))) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); return follow_pud_mask(vma, address, p4dp, flags, ctx); } @@ -830,7 +838,7 @@ static struct page *follow_page_mask(str pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return no_page_table(vma, flags); + return no_page_table(vma, flags, address); return follow_p4d_mask(vma, address, pgd, flags, ctx); } _ Patches currently in -mm which might be from peterx@xxxxxxxxxx are mm-hmm-process-pud-swap-entry-without-pud_huge.patch mm-gup-cache-p4d-in-follow_p4d_mask.patch mm-gup-check-p4d-presence-before-going-on.patch mm-x86-change-pxd_huge-behavior-to-exclude-swap-entries.patch mm-sparc-change-pxd_huge-behavior-to-exclude-swap-entries.patch mm-arm-use-macros-to-define-pmd-pud-helpers.patch mm-arm-redefine-pmd_huge-with-pmd_leaf.patch mm-arm64-merge-pxd_huge-and-pxd_leaf-definitions.patch mm-powerpc-redefine-pxd_huge-with-pxd_leaf.patch mm-gup-merge-pxd-huge-mapping-checks.patch mm-treewide-replace-pxd_huge-with-pxd_leaf.patch mm-treewide-remove-pxd_huge.patch mm-arm-remove-pmd_thp_or_huge.patch mm-document-pxd_leaf-api.patch selftests-mm-run_vmtestssh-fix-hugetlb-mem-size-calculation.patch mm-kconfig-config_pgtable_has_huge_leaves.patch mm-hugetlb-declare-hugetlbfs_pagecache_present-non-static.patch mm-make-hpage_pxd_-macros-even-if-thp.patch mm-introduce-vma_pgtable_walk_beginend.patch mm-arch-provide-pud_pfn-fallback.patch mm-gup-drop-folio_fast_pin_allowed-in-hugepd-processing.patch mm-gup-refactor-record_subpages-to-find-1st-small-page.patch mm-gup-handle-hugetlb-for-no_page_table.patch mm-gup-cache-pudp-in-follow_pud_mask.patch mm-gup-handle-huge-pud-for-follow_pud_mask.patch mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch mm-gup-handle-hugepd-for-follow_page.patch mm-gup-handle-hugetlb-in-the-generic-follow_page_mask-code.patch