The quilt patch titled Subject: mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders() has been removed from the -mm tree. Its filename was mm-shmem-move-shmem_huge_global_enabled-into-shmem_allowable_huge_orders.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Subject: mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders() Date: Mon, 22 Jul 2024 13:43:19 +0800 Move shmem_huge_global_enabled() into shmem_allowable_huge_orders(), so that shmem_allowable_huge_orders() can also help to find the allowable huge orders for tmpfs. Moreover the shmem_huge_global_enabled() can become static. While we are at it, passing the vma instead of mm for shmem_huge_global_enabled() makes code cleaner. No functional changes. Link: https://lkml.kernel.org/r/8e825146bb29ee1a1c7bd64d2968ff3e19be7815.1721626645.git.baolin.wang@xxxxxxxxxxxxxxxxx Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx> Acked-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Barry Song <21cnbao@xxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Lance Yang <ioworker0@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Zi Yan <ziy@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/shmem_fs.h | 12 +-------- mm/huge_memory.c | 12 ++------- mm/shmem.c | 47 +++++++++++++++++++++++-------------- 3 files changed, 35 insertions(+), 36 deletions(-) --- a/include/linux/shmem_fs.h~mm-shmem-move-shmem_huge_global_enabled-into-shmem_allowable_huge_orders +++ a/include/linux/shmem_fs.h @@ -111,21 +111,13 @@ extern void shmem_truncate_range(struct int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge); + bool shmem_huge_force); #else -static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, - unsigned long vm_flags) -{ - return false; -} static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge) + bool shmem_huge_force) { return 0; } --- a/mm/huge_memory.c~mm-shmem-move-shmem_huge_global_enabled-into-shmem_allowable_huge_orders +++ a/mm/huge_memory.c @@ -159,16 +159,10 @@ unsigned long __thp_vma_allowable_orders * Must be done before hugepage flags check since shmem has its * own flags. */ - if (!in_pf && shmem_file(vma->vm_file)) { - bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file), - vma->vm_pgoff, !enforce_sysfs, - vma->vm_mm, vm_flags); - - if (!vma_is_anon_shmem(vma)) - return global_huge ? orders : 0; + if (!in_pf && shmem_file(vma->vm_file)) return shmem_allowable_huge_orders(file_inode(vma->vm_file), - vma, vma->vm_pgoff, global_huge); - } + vma, vma->vm_pgoff, + !enforce_sysfs); if (!vma_is_anonymous(vma)) { /* --- a/mm/shmem.c~mm-shmem-move-shmem_huge_global_enabled-into-shmem_allowable_huge_orders +++ a/mm/shmem.c @@ -549,9 +549,10 @@ static bool shmem_confirm_swap(struct ad static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, + bool shmem_huge_force, struct vm_area_struct *vma, unsigned long vm_flags) { + struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; if (!S_ISREG(inode->i_mode)) @@ -581,15 +582,15 @@ static bool __shmem_huge_global_enabled( } } -bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; return __shmem_huge_global_enabled(inode, index, shmem_huge_force, - mm, vm_flags); + vma, vm_flags); } #if defined(CONFIG_SYSFS) @@ -772,6 +773,13 @@ static unsigned long shmem_unused_huge_s { return 0; } + +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return false; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* @@ -1625,22 +1633,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_g #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge) + bool shmem_huge_force) { unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); - unsigned long vm_flags = vma->vm_flags; + unsigned long vm_flags = vma ? vma->vm_flags : 0; + bool global_huge; loff_t i_size; int order; - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + if (vma && ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) return 0; /* If the hardware/firmware marked hugepage support disabled. */ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) return 0; + global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force, + vma, vm_flags); + if (!vma || !vma_is_anon_shmem(vma)) { + /* + * For tmpfs, we now only support PMD sized THP if huge page + * is enabled, otherwise fallback to order 0. + */ + return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; + } + /* * Following the 'deny' semantics of the top level, force the huge * option off from all mounts. @@ -2077,7 +2096,7 @@ static int shmem_get_folio_gfp(struct in struct mm_struct *fault_mm; struct folio *folio; int error; - bool alloced, huge; + bool alloced; unsigned long orders = 0; if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) @@ -2150,14 +2169,8 @@ repeat: return 0; } - huge = shmem_huge_global_enabled(inode, index, false, fault_mm, - vma ? vma->vm_flags : 0); - /* Find hugepage orders that are allowed for anonymous shmem. */ - if (vma && vma_is_anon_shmem(vma)) - orders = shmem_allowable_huge_orders(inode, vma, index, huge); - else if (huge) - orders = BIT(HPAGE_PMD_ORDER); - + /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */ + orders = shmem_allowable_huge_orders(inode, vma, index, false); if (orders > 0) { gfp_t huge_gfp; _ Patches currently in -mm which might be from baolin.wang@xxxxxxxxxxxxxxxxx are mm-swap-extend-swap_shmem_alloc-to-support-batch-swap_map_shmem-flag-setting.patch mm-shmem-extend-shmem_partial_swap_usage-to-support-large-folio-swap.patch mm-filemap-use-xa_get_order-to-get-the-swap-entry-order.patch mm-shmem-use-swap_free_nr-to-free-shmem-swap-entries.patch mm-shmem-support-large-folio-allocation-for-shmem_replace_folio.patch mm-shmem-support-large-folio-allocation-for-shmem_replace_folio-fix.patch mm-shmem-support-large-folio-allocation-for-shmem_replace_folio-fix-fix.patch mm-shmem-drop-folio-reference-count-using-nr_pages-in-shmem_delete_from_page_cache.patch mm-shmem-split-large-entry-if-the-swapin-folio-is-not-large.patch mm-shmem-split-large-entry-if-the-swapin-folio-is-not-large-fix-2.patch mm-shmem-support-large-folio-swap-out.patch mm-shmem-support-large-folio-swap-out-fix-2.patch mm-khugepaged-expand-the-is_refcount_suitable-to-support-file-folios.patch mm-khugepaged-use-the-number-of-pages-in-the-folio-to-check-the-reference-count.patch mm-khugepaged-support-shmem-mthp-copy.patch mm-khugepaged-support-shmem-mthp-collapse.patch selftests-mm-support-shmem-mthp-collapse-testing.patch