The patch titled Subject: mm/shmem: fix shmem_split_large_entry() has been added to the -mm mm-unstable branch. Its filename is mm-shmem-use-xas_try_split-in-shmem_split_large_entry-fix.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-shmem-use-xas_try_split-in-shmem_split_large_entry-fix.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Zi Yan <ziy@xxxxxxxxxx> Subject: mm/shmem: fix shmem_split_large_entry() Date: Tue, 25 Feb 2025 12:03:34 -0500 The swap entry offset was updated incorrectly. fix it. Link: https://lkml.kernel.org/r/AF487A7A-F685-485D-8D74-756C843D6F0A@xxxxxxxxxx Signed-off-by: Zi Yan <ziy@xxxxxxxxxx> Cc: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Cc: Kairui Song <kasong@xxxxxxxxxxx> Cc: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: Kirill A. Shuemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Mattew Wilcox <willy@xxxxxxxxxxxxx> Cc: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Cc: Yang Shi <yang@xxxxxxxxxxxxxxxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/shmem.c | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) --- a/mm/shmem.c~mm-shmem-use-xas_try_split-in-shmem_split_large_entry-fix +++ a/mm/shmem.c @@ -2162,7 +2162,7 @@ static int shmem_split_large_entry(struc { struct address_space *mapping = inode->i_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); - int split_order = 0; + int split_order = 0, entry_order = 0; int i; /* Convert user data gfp flags to xarray node gfp flags */ @@ -2180,35 +2180,46 @@ static int shmem_split_large_entry(struc } order = xas_get_order(&xas); + entry_order = order; /* Try to split large swap entry in pagecache */ if (order > 0) { int cur_order = order; + pgoff_t swap_index = round_down(index, 1 << order); split_order = xas_try_split_min_order(cur_order); while (cur_order > 0) { + pgoff_t aligned_index = + round_down(index, 1 << cur_order); + pgoff_t swap_offset = aligned_index - swap_index; + xas_set_order(&xas, index, split_order); xas_try_split(&xas, old, cur_order, GFP_NOWAIT); if (xas_error(&xas)) goto unlock; + + /* + * Re-set the swap entry after splitting, and + * the swap offset of the original large entry + * must be continuous. + */ + for (i = 0; i < 1 << cur_order; + i += (1 << split_order)) { + swp_entry_t tmp; + + tmp = swp_entry(swp_type(swap), + swp_offset(swap) + + swap_offset + + i); + __xa_store(&mapping->i_pages, + aligned_index + i, + swp_to_radix_entry(tmp), 0); + } cur_order = split_order; split_order = xas_try_split_min_order(split_order); } - - /* - * Re-set the swap entry after splitting, and the swap - * offset of the original large entry must be continuous. - */ - for (i = 0; i < 1 << order; i++) { - pgoff_t aligned_index = round_down(index, 1 << order); - swp_entry_t tmp; - - tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); - __xa_store(&mapping->i_pages, aligned_index + i, - swp_to_radix_entry(tmp), 0); - } } unlock: @@ -2221,7 +2232,7 @@ unlock: if (xas_error(&xas)) return xas_error(&xas); - return split_order; + return entry_order; } /* _ Patches currently in -mm which might be from ziy@xxxxxxxxxx are selftests-mm-make-file-backed-thp-split-work-by-writing-pmd-size-data.patch mm-huge_memory-allow-split-shmem-large-folio-to-any-lower-order.patch selftests-mm-test-splitting-file-backed-thp-to-any-lower-order.patch xarray-add-xas_try_split-to-split-a-multi-index-entry.patch mm-huge_memory-add-two-new-not-yet-used-functions-for-folio_split.patch mm-huge_memory-move-folio-split-common-code-to-__folio_split.patch mm-huge_memory-add-buddy-allocator-like-non-uniform-folio_split.patch mm-huge_memory-remove-the-old-unused-__split_huge_page.patch mm-huge_memory-add-folio_split-to-debugfs-testing-interface.patch mm-truncate-use-buddy-allocator-like-folio-split-for-truncate-operation.patch selftests-mm-add-tests-for-folio_split-buddy-allocator-like-split.patch mm-filemap-use-xas_try_split-in-__filemap_add_folio.patch mm-shmem-use-xas_try_split-in-shmem_split_large_entry.patch mm-shmem-use-xas_try_split-in-shmem_split_large_entry-fix.patch