It might be desirable for an architecture to modify the gfp flags used to allocate the destination page for migration based on the page that it is being replaced. For example, if an architectures has metadata associated with a page (like arm64, when the memory tagging extension is implemented), it can request that the destination page similarly has storage for tags already allocated. No functional change. Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx> --- include/linux/migrate.h | 4 ++++ mm/mempolicy.c | 2 ++ mm/migrate.c | 3 +++ 3 files changed, 9 insertions(+) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 2ce13e8a309b..0acef592043c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -60,6 +60,10 @@ struct movable_operations { /* Defined in mm/debug.c: */ extern const char *migrate_reason_names[MR_TYPES]; +#ifndef arch_migration_target_gfp +#define arch_migration_target_gfp(src, gfp) 0 +#endif + #ifdef CONFIG_MIGRATION void putback_movable_pages(struct list_head *l); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 10a590ee1c89..50bc43ab50d6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1182,6 +1182,7 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src, h = folio_hstate(src); gfp = htlb_alloc_mask(h); + gfp |= arch_migration_target_gfp(src, gfp); nodemask = policy_nodemask(gfp, pol, ilx, &nid); return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp); } @@ -1190,6 +1191,7 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src, gfp = GFP_TRANSHUGE; else gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP; + gfp |= arch_migration_target_gfp(src, gfp); page = alloc_pages_mpol(gfp, order, pol, ilx, nid); return page_rmappable_folio(page); diff --git a/mm/migrate.c b/mm/migrate.c index 35a88334bb3c..dd25ab69e3de 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2016,6 +2016,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) struct hstate *h = folio_hstate(src); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); + gfp_mask |= arch_migration_target_gfp(src, gfp); return alloc_hugetlb_folio_nodemask(h, nid, mtc->nmask, gfp_mask); } @@ -2032,6 +2033,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) zidx = zone_idx(folio_zone(src)); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; + gfp_mask |= arch_migration_target_gfp(src, gfp); return __folio_alloc(gfp_mask, order, nid, mtc->nmask); } @@ -2500,6 +2502,7 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, __GFP_NOWARN; gfp &= ~__GFP_RECLAIM; } + gfp |= arch_migration_target_gfp(src, gfp); return __folio_alloc_node(gfp, order, nid); } -- 2.42.1