To migrate unmapped pagecache folios, migrate_misplaced_folio and migrate_misplaced_folio_prepare must handle folios without VMAs. migrate_misplaced_folio_prepare checks VMA for exec bits, so allow a NULL VMA when it does not have a mapping. migrate_misplaced_folio does not require a VMA, remove it from the argument list. Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx> Signed-off-by: Gregory Price <gourry@xxxxxxxxxx> --- include/linux/migrate.h | 6 ++---- mm/huge_memory.c | 2 +- mm/memory.c | 2 +- mm/migrate.c | 7 +++---- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 644be30b69c8..553bdb4a65db 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -141,16 +141,14 @@ const struct movable_operations *page_movable_ops(struct page *page) #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node); -int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, - int node); +int migrate_misplaced_folio(struct folio *folio, int node); #else static inline int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } -static inline int migrate_misplaced_folio(struct folio *folio, - struct vm_area_struct *vma, int node) +static inline int migrate_misplaced_folio(struct folio *folio, int node) { return -EAGAIN; /* can't migrate now */ } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f4be468e06a4..f79c42e36d37 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1725,7 +1725,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) spin_unlock(vmf->ptl); writable = false; - if (!migrate_misplaced_folio(folio, vma, target_nid)) { + if (!migrate_misplaced_folio(folio, target_nid)) { flags |= TNF_MIGRATED; nid = target_nid; } else { diff --git a/mm/memory.c b/mm/memory.c index 34f8402d2046..1d97bdfd0ed6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5355,7 +5355,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) ignore_writable = true; /* Migrate to the requested node */ - if (!migrate_misplaced_folio(folio, vma, target_nid)) { + if (!migrate_misplaced_folio(folio, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; } else { diff --git a/mm/migrate.c b/mm/migrate.c index e7296c0fb5d5..c648dc08758b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2546,7 +2546,7 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, /* * Prepare for calling migrate_misplaced_folio() by isolating the folio if - * permitted. Must be called with the PTL still held. + * permitted. If VMA is not NULL, Must be called with the PTL still held. */ int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node) @@ -2563,7 +2563,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio, * See folio_likely_mapped_shared() on possible imprecision * when we cannot easily detect if a folio is shared. */ - if ((vma->vm_flags & VM_EXEC) && + if (vma && (vma->vm_flags & VM_EXEC) && folio_likely_mapped_shared(folio)) return -EACCES; @@ -2614,8 +2614,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio, * elevated reference count on the folio. This function will un-isolate the * folio, dereferencing the folio before returning. */ -int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, int node) { pg_data_t *pgdat = NODE_DATA(node); int nr_remaining; -- 2.43.0