Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



diff --git a/mm/migrate.c b/mm/migrate.c
index dbfa910ec24b..53f8429a8ebe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
  	}
  }
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+	bool isolated = false;

No need to initialize this to "false".

+
+	if (folio_test_hugetlb(folio)) {
+		isolated = isolate_hugetlb(folio, list);
+	} else {
+		bool lru = !__folio_test_movable(folio);
+
+		if (lru)
+			isolated = folio_isolate_lru(folio);
+		else
+			isolated = isolate_movable_page(&folio->page,
+							ISOLATE_UNEVICTABLE);
+
+		if (isolated) {
+			list_add(&folio->lru, list);
+			if (lru)
+				node_stat_add_folio(folio, NR_ISOLATED_ANON +
+						    folio_is_file_lru(folio));
+		}
+	}
+
+	return isolated;

Revisiting this patch, we should likely do

bool isolated, lru;

if (folio_test_hugetlb(folio))
	return isolate_hugetlb(folio, list);

lru = !__folio_test_movable(folio);
if (lru)
...

if (!isolated)
	return false;

list_add(&folio->lru, list);
if (lru)
	node_stat_add_folio(folio, NR_ISOLATED_ANON +
			    folio_is_file_lru(folio));
return true;


to avoid one indentation level and clean up the code flow a bit.

+}
+
  static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
  					  struct folio *folio,
  					  unsigned long idx)

--
Cheers,

David / dhildenb





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux