The quilt patch titled Subject: mm: shmem: remove __shmem_huge_global_enabled() has been removed from the -mm tree. Its filename was mm-shmem-remove-__shmem_huge_global_enabled.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Subject: mm: shmem: remove __shmem_huge_global_enabled() Date: Thu, 17 Oct 2024 22:14:57 +0800 Remove __shmem_huge_global_enabled() since it as only one caller, and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP as they are checked in shmem_allowable_huge_orders(), also remove unnecessary vma parameter. Link: https://lkml.kernel.org/r/20241017141457.1169092-2-wangkefeng.wang@xxxxxxxxxx Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Reviewed-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Acked-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Barry Song <baohua@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/shmem.c | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) --- a/mm/shmem.c~mm-shmem-remove-__shmem_huge_global_enabled +++ a/mm/shmem.c @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct ad static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, - unsigned long vm_flags) +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { - struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; - if (!S_ISREG(inode->i_mode)) + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) + if (!S_ISREG(inode->i_mode)) return false; if (shmem_huge == SHMEM_HUGE_DENY) return false; @@ -576,7 +574,7 @@ static bool __shmem_huge_global_enabled( return true; fallthrough; case SHMEM_HUGE_ADVISE: - if (mm && (vm_flags & VM_HUGEPAGE)) + if (vm_flags & VM_HUGEPAGE) return true; fallthrough; default: @@ -584,17 +582,6 @@ static bool __shmem_huge_global_enabled( } } -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) -{ - if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return false; - - return __shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); -} - #if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { @@ -772,8 +759,8 @@ static unsigned long shmem_unused_huge_s } static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { return false; } @@ -1170,7 +1157,7 @@ static int shmem_getattr(struct mnt_idma generic_fillattr(idmap, request_mask, inode, stat); inode_unlock_shared(inode); - if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, 0, false, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -1687,7 +1674,7 @@ unsigned long shmem_allowable_huge_order return 0; global_huge = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); + shmem_huge_force, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page _ Patches currently in -mm which might be from wangkefeng.wang@xxxxxxxxxx are mm-use-aligned-address-in-clear_gigantic_page.patch mm-use-aligned-address-in-copy_user_gigantic_page.patch