Regression was observed when the system is in high memory pressure with swap on, where migrc might keep a number of folios in its pending queue, which possibly makes it worse. So temporarily prevented migrc from working on that condition. Signed-off-by: Byungchul Park <byungchul@xxxxxx> --- mm/internal.h | 20 ++++++++++++++++++++ mm/migrate.c | 18 +++++++++++++++++- mm/page_alloc.c | 13 +++++++++++++ 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/mm/internal.h b/mm/internal.h index 5b93e5abf953..7643ccb7c156 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1285,6 +1285,8 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, #endif /* CONFIG_SHRINKER_DEBUG */ #if defined(CONFIG_MIGRATION) && defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) +extern atomic_t migrc_pause_cnt; + /* * Reset the indicator indicating there are no writable mappings at the * beginning of every rmap traverse for unmap. Migrc can work only when @@ -1313,6 +1315,21 @@ static inline bool can_migrc_test(void) return current->can_migrc && current->tlb_ubc_ro.flush_required; } +static inline void migrc_pause(void) +{ + atomic_inc(&migrc_pause_cnt); +} + +static inline void migrc_resume(void) +{ + atomic_dec(&migrc_pause_cnt); +} + +static inline bool migrc_paused(void) +{ + return !!atomic_read(&migrc_pause_cnt); +} + /* * Return the number of folios pending TLB flush that have yet to get * freed in the zone. @@ -1330,6 +1347,9 @@ bool migrc_flush_free_folios(void); static inline void can_migrc_init(void) {} static inline void can_migrc_fail(void) {} static inline bool can_migrc_test(void) { return false; } +static inline void migrc_pause(void) {} +static inline void migrc_resume(void) {} +static inline bool migrc_paused(void) { return false; } static inline int migrc_pending_nr_in_zone(struct zone *z) { return 0; } static inline bool migrc_flush_free_folios(void) { return false; } #endif diff --git a/mm/migrate.c b/mm/migrate.c index da4d43ac2622..b838f279f050 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -62,6 +62,12 @@ static struct tlbflush_unmap_batch migrc_ubc; static LIST_HEAD(migrc_folios); static DEFINE_SPINLOCK(migrc_lock); +/* + * Increase on entry of handling high memory pressure e.g. direct + * reclaim, decrease on the exit. See __alloc_pages_slowpath(). + */ +atomic_t migrc_pause_cnt = ATOMIC_INIT(0); + static void init_tlb_ubc(struct tlbflush_unmap_batch *ubc) { arch_tlbbatch_clear(&ubc->arch); @@ -1922,7 +1928,8 @@ static int migrate_pages_batch(struct list_head *from, */ init_tlb_ubc(&pending_ubc); do_migrc = IS_ENABLED(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && - (reason == MR_DEMOTION || reason == MR_NUMA_MISPLACED); + (reason == MR_DEMOTION || reason == MR_NUMA_MISPLACED) && + !migrc_paused(); for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; @@ -1961,6 +1968,15 @@ static int migrate_pages_batch(struct list_head *from, continue; } + /* + * In case that the system is in high memory + * pressure, give up migrc mechanism this turn. + */ + if (unlikely(do_migrc && migrc_paused())) { + fold_ubc(tlb_ubc, &pending_ubc); + do_migrc = false; + } + can_migrc_init(); rc = migrate_folio_unmap(get_new_folio, put_new_folio, private, folio, &dst, mode, reason, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ef0c22b1109..366777afce7f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4072,6 +4072,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned int cpuset_mems_cookie; unsigned int zonelist_iter_cookie; int reserve_flags; + bool migrc_paused = false; restart: compaction_retries = 0; @@ -4203,6 +4204,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (page) goto got_pg; + /* + * The system is in very high memory pressure. Pause migrc from + * expanding its pending queue temporarily. + */ + if (!migrc_paused) { + migrc_pause(); + migrc_paused = true; + migrc_flush_free_folios(); + } + /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) goto nopage; @@ -4330,6 +4341,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: + if (migrc_paused) + migrc_resume(); return page; } -- 2.17.1