This is similar to __migrate_folio(), use folio_mc_copy() in HugeTLB folio migration to avoid panic when copy from poisoned folio. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- fs/hugetlbfs/inode.c | 2 +- mm/migrate.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 9456e1d55540..ecad73a4f713 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1128,7 +1128,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping, hugetlb_set_folio_subpool(src, NULL); } - folio_migrate_copy(dst, src); + folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } diff --git a/mm/migrate.c b/mm/migrate.c index f9d700d82ea9..ad78b053815a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -537,10 +537,16 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { XA_STATE(xas, &mapping->i_pages, folio_index(src)); - int expected_count; + int rc, expected_count = folio_expected_refs(mapping, src); + + if (folio_ref_count(src) != expected_count) + return -EAGAIN; + + rc = folio_mc_copy(dst, src); + if (unlikely(rc)) + return rc; xas_lock_irq(&xas); - expected_count = folio_expected_refs(mapping, src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; -- 2.27.0