The patch titled Subject: mm: convert hugetlb_page_mapping_lock_write() to hugetlb_folio_mapping_lock_write has been added to the -mm mm-unstable branch. Its filename is mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Subject: mm: convert hugetlb_page_mapping_lock_write() to hugetlb_folio_mapping_lock_write Date: Wed Apr 24 04:20:30 PM PDT 2024 Convert this to use folios, so we can remove page_mapping() Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/hugetlb.h | 6 +++--- mm/hugetlb.c | 6 +++--- mm/memory-failure.c | 4 ++-- mm/migrate.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) --- a/mm/hugetlb.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write +++ a/mm/hugetlb.c @@ -2155,13 +2155,13 @@ static bool prep_compound_gigantic_folio /* * Find and lock address space (mapping) in write mode. * - * Upon entry, the page is locked which means that page_mapping() is + * Upon entry, the folio is locked which means that folio_mapping() is * stable. Due to locking order, we can only trylock_write. If we can * not get the lock, simply return NULL to caller. */ -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) { - struct address_space *mapping = page_mapping(hpage); + struct address_space *mapping = folio_mapping(folio); if (!mapping) return mapping; --- a/mm/memory-failure.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write +++ a/mm/memory-failure.c @@ -1595,7 +1595,7 @@ static bool hwpoison_user_mappings(struc * XXX: the dirty test could be racy: set_page_dirty() may not always * be called inside page lock (it's recommended but not enforced). */ - mapping = page_mapping(hpage); + mapping = folio_mapping(folio); if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && mapping_can_writeback(mapping)) { if (page_mkclean(hpage)) { @@ -1622,7 +1622,7 @@ static bool hwpoison_user_mappings(struc * TTU_RMAP_LOCKED to indicate we have taken the lock * at this higher level. */ - mapping = hugetlb_page_mapping_lock_write(hpage); + mapping = hugetlb_folio_mapping_lock_write(folio); if (mapping) { try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); --- a/include/linux/hugetlb.h~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write +++ a/include/linux/hugetlb.h @@ -178,7 +178,7 @@ bool hugetlbfs_pagecache_present(struct struct vm_area_struct *vma, unsigned long address); -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; @@ -297,8 +297,8 @@ static inline unsigned long hugetlb_tota return 0; } -static inline struct address_space *hugetlb_page_mapping_lock_write( - struct page *hpage) +static inline struct address_space *hugetlb_folio_mapping_lock_write( + struct folio *folio) { return NULL; } --- a/mm/migrate.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write +++ a/mm/migrate.c @@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_ * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(&src->page); + mapping = hugetlb_folio_mapping_lock_write(src); if (unlikely(!mapping)) goto unlock_put_anon; _ Patches currently in -mm which might be from akpm@xxxxxxxxxxxxxxxxxxxx are selftests-harness-remove-use-of-line_max-fix.patch selftests-harness-remove-use-of-line_max-fix-fix.patch mm-sparc-change-pxd_huge-behavior-to-exclude-swap-entries-fix.patch mm-hold-ptl-from-the-first-pte-while-reclaiming-a-large-folio-fix.patch sh-remove-use-of-pg_arch_1-on-individual-pages-fix.patch mm-gup-drop-folio_fast_pin_allowed-in-hugepd-processing-fix.patch mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.patch arm-mm-drop-vm_fault_badmap-vm_fault_badaccess-checkpatch-fixes.patch mm-hugetlb-rename-dissolve_free_huge_pages-to-dissolve_free_hugetlb_folios-fix.patch mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write.patch __mod_memcg_lruvec_state-enhance-diagnostics.patch __mod_memcg_lruvec_state-enhance-diagnostics-fix.patch binfmt_elf_fdpic-fix-proc-pid-auxv-checkpatch-fixes.patch