Unmapped pagecache presently cannot be promoted off of lower tiers, even if they are being accessed by read/write calls. Enable migration in mark_folio_accessed if a given folio presently resides on a lower tier of memory. Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx> Signed-off-by: Gregory Price <gourry@xxxxxxxxxx> --- mm/swap.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mm/swap.c b/mm/swap.c index 9caf6b017cf0..08f76d748e29 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -37,6 +37,9 @@ #include <linux/page_idle.h> #include <linux/local_lock.h> #include <linux/buffer_head.h> +#include <linux/sched/sysctl.h> +#include <linux/memory-tiers.h> +#include <linux/migrate.h> #include "internal.h" @@ -463,9 +466,26 @@ static void folio_inc_refs(struct folio *folio) * * When a newly allocated folio is not yet visible, so safe for non-atomic ops, * __folio_set_referenced() may be substituted for folio_mark_accessed(). + * + * This call may also attempt to migrate the folio memory to the local node + * if it presently resides on a lower memory tier. */ void folio_mark_accessed(struct folio *folio) { + int nid = folio_nid(folio); + + /* If folio is on lower tier, try to promote to local node */ + if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && + (nid == NUMA_NO_NODE || !node_is_toptier(nid))) { + int flags; + + nid = numa_migrate_prep(folio, NULL, 0, nid, &flags); + if ((nid != NUMA_NO_NODE) && + !migrate_misplaced_folio_prepare(folio, NULL, nid)) { + migrate_misplaced_folio(folio, nid); + } + } + if (lru_gen_enabled()) { folio_inc_refs(folio); return; -- 2.43.0