On 2024/8/26 22:50, David Hildenbrand wrote:
diff --git a/mm/migrate.c b/mm/migrate.c
index dbfa910ec24b..53f8429a8ebe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
}
}
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ bool isolated = false;
No need to initialize this to "false".
+
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, list);
+ } else {
+ bool lru = !__folio_test_movable(folio);
+
+ if (lru)
+ isolated = folio_isolate_lru(folio);
+ else
+ isolated = isolate_movable_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
+
+ if (isolated) {
+ list_add(&folio->lru, list);
+ if (lru)
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
+ }
+ }
+
+ return isolated;
Revisiting this patch, we should likely do
bool isolated, lru;
if (folio_test_hugetlb(folio))
return isolate_hugetlb(folio, list);
lru = !__folio_test_movable(folio);
if (lru)
...
if (!isolated)
return false;
list_add(&folio->lru, list);
if (lru)
node_stat_add_folio(folio, NR_ISOLATED_ANON +
folio_is_file_lru(folio));
return true;
to avoid one indentation level and clean up the code flow a bit.
Sure, will rewrite according to above pattern.
+}
+
static bool try_to_map_unused_to_zeropage(struct
page_vma_mapped_walk *pvmw,
struct folio *folio,
unsigned long idx)