The patch titled Subject: mm: shmem: remove __shmem_huge_global_enabled() has been added to the -mm mm-unstable branch. Its filename is mm-shmem-remove-__shmem_huge_global_enabled.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-shmem-remove-__shmem_huge_global_enabled.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Subject: mm: shmem: remove __shmem_huge_global_enabled() Date: Thu, 17 Oct 2024 22:14:57 +0800 Remove __shmem_huge_global_enabled() since it as only one caller, and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP as they are checked in shmem_allowable_huge_orders(), also remove unnecessary vma parameter. Link: https://lkml.kernel.org/r/20241017141457.1169092-2-wangkefeng.wang@xxxxxxxxxx Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Reviewed-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Cc: Barry Song <baohua@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/shmem.c | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) --- a/mm/shmem.c~mm-shmem-remove-__shmem_huge_global_enabled +++ a/mm/shmem.c @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct ad static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, - unsigned long vm_flags) +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { - struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; - if (!S_ISREG(inode->i_mode)) + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) + if (!S_ISREG(inode->i_mode)) return false; if (shmem_huge == SHMEM_HUGE_DENY) return false; @@ -576,7 +574,7 @@ static bool __shmem_huge_global_enabled( return true; fallthrough; case SHMEM_HUGE_ADVISE: - if (mm && (vm_flags & VM_HUGEPAGE)) + if (vm_flags & VM_HUGEPAGE) return true; fallthrough; default: @@ -584,17 +582,6 @@ static bool __shmem_huge_global_enabled( } } -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) -{ - if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return false; - - return __shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); -} - #if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { @@ -772,8 +759,8 @@ static unsigned long shmem_unused_huge_s } static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { return false; } @@ -1170,7 +1157,7 @@ static int shmem_getattr(struct mnt_idma generic_fillattr(idmap, request_mask, inode, stat); inode_unlock_shared(inode); - if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, 0, false, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -1687,7 +1674,7 @@ unsigned long shmem_allowable_huge_order return 0; global_huge = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); + shmem_huge_force, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page _ Patches currently in -mm which might be from wangkefeng.wang@xxxxxxxxxx are mm-remove-unused-hugepage-for-vma_alloc_folio.patch tmpfs-dont-enable-large-folios-if-not-supported.patch mm-huge_memory-move-file_thp_enabled-into-huge_memoryc.patch mm-shmem-remove-__shmem_huge_global_enabled.patch