The patch titled Subject: mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap has been added to the -mm mm-unstable branch. Its filename is mm-shmem-change-shmem_huge_global_enabled-to-return-huge-order-bitmap.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-shmem-change-shmem_huge_global_enabled-to-return-huge-order-bitmap.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Subject: mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap Date: Thu, 28 Nov 2024 15:40:40 +0800 Change the shmem_huge_global_enabled() to return the suitable huge order bitmap, and return 0 if huge pages are not allowed. This is a preparation for supporting various huge orders allocation of tmpfs in the following patches. No functional changes. Link: https://lkml.kernel.org/r/9dce1cfad3e9c1587cf1a0ea782ddbebd0e92984.1732779148.git.baolin.wang@xxxxxxxxxxxxxxxxx Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Acked-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Barry Song <baohua@xxxxxxxxxx> Cc: Daniel Gomez <da.gomez@xxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: Lance Yang <ioworker0@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/shmem.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) --- a/mm/shmem.c~mm-shmem-change-shmem_huge_global_enabled-to-return-huge-order-bitmap +++ a/mm/shmem.c @@ -554,37 +554,37 @@ static bool shmem_confirm_swap(struct ad static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - unsigned long vm_flags) +static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { loff_t i_size; if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return false; + return 0; if (!S_ISREG(inode->i_mode)) - return false; + return 0; if (shmem_huge == SHMEM_HUGE_DENY) - return false; + return 0; if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) - return true; + return BIT(HPAGE_PMD_ORDER); switch (SHMEM_SB(inode->i_sb)->huge) { case SHMEM_HUGE_ALWAYS: - return true; + return BIT(HPAGE_PMD_ORDER); case SHMEM_HUGE_WITHIN_SIZE: index = round_up(index + 1, HPAGE_PMD_NR); i_size = max(write_end, i_size_read(inode)); i_size = round_up(i_size, PAGE_SIZE); if (i_size >> PAGE_SHIFT >= index) - return true; + return BIT(HPAGE_PMD_ORDER); fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) - return true; + return BIT(HPAGE_PMD_ORDER); fallthrough; default: - return false; + return 0; } } @@ -779,11 +779,11 @@ static unsigned long shmem_unused_huge_s return 0; } -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - unsigned long vm_flags) +static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { - return false; + return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -1685,21 +1685,21 @@ unsigned long shmem_allowable_huge_order unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma ? vma->vm_flags : 0; - bool global_huge; + unsigned int global_orders; loff_t i_size; int order; if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) return 0; - global_huge = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vm_flags); + global_orders = shmem_huge_global_enabled(inode, index, write_end, + shmem_huge_force, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page * is enabled, otherwise fallback to order 0. */ - return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; + return global_orders; } /* @@ -1732,7 +1732,7 @@ unsigned long shmem_allowable_huge_order if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_shmem_orders_madvise); - if (global_huge) + if (global_orders > 0) mask |= READ_ONCE(huge_shmem_orders_inherit); return THP_ORDERS_ALL_FILE_DEFAULT & mask; _ Patches currently in -mm which might be from baolin.wang@xxxxxxxxxxxxxxxxx are mm-factor-out-the-order-calculation-into-a-new-helper.patch mm-shmem-change-shmem_huge_global_enabled-to-return-huge-order-bitmap.patch mm-shmem-add-large-folio-support-for-tmpfs.patch mm-shmem-add-a-kernel-command-line-to-change-the-default-huge-policy-for-tmpfs.patch docs-tmpfs-drop-fadvise-from-the-documentation.patch