Zi Yan <ziy@xxxxxxxxxx> writes: > On 8 May 2023, at 22:20, Huang Ying wrote: > >> After recent changes to the retrying and failure counting in >> migrate_pages_batch(), it was found that it's unnecessary to count >> retrying and failure for normal, large, and THP folios separately. >> Because we don't use retrying and failure number of large folios >> directly. So, in this patch, we simplified retrying and failure >> counting of large folios via counting retrying and failure of normal >> and large folios together. This results in the reduced line number. >> >> This is just code cleanup, no functionality changes are expected. >> >> Signed-off-by: "Huang, Ying" <ying.huang@xxxxxxxxx> >> Cc: Xin Hao <xhao@xxxxxxxxxxxxxxxxx> >> Cc: Zi Yan <ziy@xxxxxxxxxx> >> Cc: Yang Shi <shy828301@xxxxxxxxx> >> Cc: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> >> Cc: Oscar Salvador <osalvador@xxxxxxx> >> Cc: Alistair Popple <apopple@xxxxxxxxxx> >> --- >> mm/migrate.c | 103 +++++++++++++++++---------------------------------- >> 1 file changed, 35 insertions(+), 68 deletions(-) >> >> diff --git a/mm/migrate.c b/mm/migrate.c >> index 01cac26a3127..10709aed76d3 100644 >> --- a/mm/migrate.c >> +++ b/mm/migrate.c >> @@ -1614,11 +1614,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> int nr_pass) >> { >> int retry = 1; >> - int large_retry = 1; >> int thp_retry = 1; >> int nr_failed = 0; >> int nr_retry_pages = 0; >> - int nr_large_failed = 0; >> int pass = 0; >> bool is_large = false; >> bool is_thp = false; >> @@ -1631,9 +1629,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && >> !list_empty(from) && !list_is_singular(from)); >> >> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { >> + for (pass = 0; pass < nr_pass && retry; pass++) { >> retry = 0; >> - large_retry = 0; >> thp_retry = 0; >> nr_retry_pages = 0; >> >> @@ -1660,7 +1657,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> * list is processed. >> */ >> if (!thp_migration_supported() && is_thp) { >> - nr_large_failed++; >> + nr_failed++; >> stats->nr_thp_failed++; >> if (!try_split_folio(folio, split_folios)) { >> stats->nr_thp_split++; >> @@ -1688,38 +1685,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> * When memory is low, don't bother to try to migrate >> * other folios, move unmapped folios, then exit. >> */ >> - if (is_large) { >> - nr_large_failed++; >> - stats->nr_thp_failed += is_thp; >> - /* Large folio NUMA faulting doesn't split to retry. */ >> - if (!nosplit) { >> - int ret = try_split_folio(folio, split_folios); >> - >> - if (!ret) { >> - stats->nr_thp_split += is_thp; >> - break; >> - } else if (reason == MR_LONGTERM_PIN && >> - ret == -EAGAIN) { >> - /* >> - * Try again to split large folio to >> - * mitigate the failure of longterm pinning. >> - */ >> - large_retry++; >> - thp_retry += is_thp; >> - nr_retry_pages += nr_pages; >> - /* Undo duplicated failure counting. */ >> - nr_large_failed--; >> - stats->nr_thp_failed -= is_thp; >> - break; >> - } >> + nr_failed++; >> + stats->nr_thp_failed += is_thp; >> + /* Large folio NUMA faulting doesn't split to retry. */ >> + if (is_large && !nosplit) { >> + int ret = try_split_folio(folio, split_folios); >> + >> + if (!ret) { >> + stats->nr_thp_split += is_thp; >> + break; >> + } else if (reason == MR_LONGTERM_PIN && >> + ret == -EAGAIN) { >> + /* >> + * Try again to split large folio to >> + * mitigate the failure of longterm pinning. >> + */ >> + retry++; >> + thp_retry += is_thp; >> + nr_retry_pages += nr_pages; >> + /* Undo duplicated failure counting. */ >> + nr_failed--; >> + stats->nr_thp_failed -= is_thp; >> + break; >> } >> - } else { >> - nr_failed++; >> } >> >> stats->nr_failed_pages += nr_pages + nr_retry_pages; >> /* nr_failed isn't updated for not used */ >> - nr_large_failed += large_retry; >> stats->nr_thp_failed += thp_retry; >> rc_saved = rc; >> if (list_empty(&unmap_folios)) >> @@ -1727,12 +1719,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> else >> goto move; >> case -EAGAIN: >> - if (is_large) { >> - large_retry++; >> - thp_retry += is_thp; >> - } else { >> - retry++; >> - } >> + retry++; >> + thp_retry += is_thp; >> nr_retry_pages += nr_pages; >> break; >> case MIGRATEPAGE_SUCCESS: >> @@ -1750,20 +1738,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> * removed from migration folio list and not >> * retried in the next outer loop. >> */ >> - if (is_large) { >> - nr_large_failed++; >> - stats->nr_thp_failed += is_thp; >> - } else { >> - nr_failed++; >> - } >> - >> + nr_failed++; >> + stats->nr_thp_failed += is_thp; >> stats->nr_failed_pages += nr_pages; >> break; >> } >> } >> } >> nr_failed += retry; >> - nr_large_failed += large_retry; >> stats->nr_thp_failed += thp_retry; >> stats->nr_failed_pages += nr_retry_pages; >> move: >> @@ -1771,17 +1753,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> try_to_unmap_flush(); >> >> retry = 1; >> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { >> + for (pass = 0; pass < nr_pass && retry; pass++) { >> retry = 0; >> - large_retry = 0; >> thp_retry = 0; >> nr_retry_pages = 0; >> >> dst = list_first_entry(&dst_folios, struct folio, lru); >> dst2 = list_next_entry(dst, lru); >> list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { >> - is_large = folio_test_large(folio); >> - is_thp = is_large && folio_test_pmd_mappable(folio); >> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); > > Should this be part of patch 2? Or maybe just merge two patches? OK. Will merge 2 patches. >> nr_pages = folio_nr_pages(folio); >> >> cond_resched(); >> @@ -1797,12 +1777,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> */ >> switch(rc) { >> case -EAGAIN: >> - if (is_large) { >> - large_retry++; >> - thp_retry += is_thp; >> - } else { >> - retry++; >> - } >> + retry++; >> + thp_retry += is_thp; >> nr_retry_pages += nr_pages; >> break; >> case MIGRATEPAGE_SUCCESS: >> @@ -1810,13 +1786,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> stats->nr_thp_succeeded += is_thp; >> break; >> default: >> - if (is_large) { >> - nr_large_failed++; >> - stats->nr_thp_failed += is_thp; >> - } else { >> - nr_failed++; >> - } >> - >> + nr_failed++; >> + stats->nr_thp_failed += is_thp; >> stats->nr_failed_pages += nr_pages; >> break; >> } >> @@ -1825,14 +1796,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, >> } >> } >> nr_failed += retry; >> - nr_large_failed += large_retry; >> stats->nr_thp_failed += thp_retry; >> stats->nr_failed_pages += nr_retry_pages; >> >> - if (rc_saved) >> - rc = rc_saved; >> - else >> - rc = nr_failed + nr_large_failed; >> + rc = rc_saved ? : nr_failed; >> out: >> /* Cleanup remaining folios */ >> dst = list_first_entry(&dst_folios, struct folio, lru); >> -- >> 2.39.2 > > Otherwise LGTM. Reviewed-by: Zi Yan <ziy@xxxxxxxxxx> Thanks! Best Regards, Huang, Ying