This is similar to __migrate_folio(), use folio_mc_copy() in HugeTLB folio migration to avoid panic when copy from poisoned folio. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- fs/hugetlbfs/inode.c | 2 +- mm/migrate.c | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 6df794ed4066..1107e5aa8343 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1128,7 +1128,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping, hugetlb_set_folio_subpool(src, NULL); } - folio_migrate_copy(dst, src); + folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } diff --git a/mm/migrate.c b/mm/migrate.c index 28aa9da95781..e9b52a86f539 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -532,15 +532,19 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { XA_STATE(xas, &mapping->i_pages, folio_index(src)); - int expected_count; + int ret, expected_count = folio_expected_refs(mapping, src); - xas_lock_irq(&xas); - expected_count = folio_expected_refs(mapping, src); - if (!folio_ref_freeze(src, expected_count)) { - xas_unlock_irq(&xas); + if (!folio_ref_freeze(src, expected_count)) return -EAGAIN; + + ret = folio_mc_copy(dst, src); + if (unlikely(ret)) { + folio_ref_unfreeze(src, expected_count); + return ret; } + xas_lock_irq(&xas); + dst->index = src->index; dst->mapping = src->mapping; -- 2.27.0