Re: [PATCH next] mm/vmscan: __isolate_lru_page_prepare clean up

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 11/22/20 3:00 PM, Alex Shi wrote:
Thanks a lot for all comments, I picked all up and here is the v3:

 From 167131dd106a96fd08af725df850e0da6ec899af Mon Sep 17 00:00:00 2001
From: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Date: Fri, 20 Nov 2020 14:49:16 +0800
Subject: [PATCH v3 next] mm/vmscan: __isolate_lru_page_prepare clean up

The function just return 2 results, so use a 'switch' to deal with its
result is unnecessary, and simplify it to a bool func as Vlastimil
suggested.

Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's
suggestion to update comments in function.

I wouldn't mind if the goto stayed, but it's not repeating that much without it (list_move() + continue, 3 times) so...

Acked-by: Vlastimil Babka <vbabka@xxxxxxx>

Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Yu Zhao <yuzhao@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
---
  include/linux/swap.h |  2 +-
  mm/compaction.c      |  2 +-
  mm/vmscan.c          | 68 ++++++++++++++++++++------------------------
  3 files changed, 33 insertions(+), 39 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 596bc2f4d9b0..5bba15ac5a2e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
  extern unsigned long zone_reclaimable_pages(struct zone *zone);
  extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  					gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
+extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
  extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  						  unsigned long nr_pages,
  						  gfp_t gfp_mask,
diff --git a/mm/compaction.c b/mm/compaction.c
index b68931854253..8d71ffebe6cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  		if (unlikely(!get_page_unless_zero(page)))
  			goto isolate_fail;
- if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
+		if (!__isolate_lru_page_prepare(page, isolate_mode))
  			goto isolate_fail_put;
/* Try isolate the page */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c6f94e55c3fe..4d2703c43310 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1536,19 +1536,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
   * page:	page to consider
   * mode:	one of the LRU isolation modes defined above
   *
- * returns 0 on success, -ve errno on failure.
+ * returns true on success, false on failure.
   */
-int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
+bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
  {
-	int ret = -EBUSY;
-
  	/* Only take pages on the LRU. */
  	if (!PageLRU(page))
-		return ret;
+		return false;
/* Compaction should not handle unevictable pages but CMA can do so */
  	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
-		return ret;
+		return false;
/*
  	 * To minimise LRU disruption, the caller can indicate that it only
@@ -1561,7 +1559,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
  	if (mode & ISOLATE_ASYNC_MIGRATE) {
  		/* All the caller can do on PageWriteback is block */
  		if (PageWriteback(page))
-			return ret;
+			return false;
if (PageDirty(page)) {
  			struct address_space *mapping;
@@ -1577,20 +1575,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
  			 * from the page cache.
  			 */
  			if (!trylock_page(page))
-				return ret;
+				return false;
mapping = page_mapping(page);
  			migrate_dirty = !mapping || mapping->a_ops->migratepage;
  			unlock_page(page);
  			if (!migrate_dirty)
-				return ret;
+				return false;
  		}
  	}
if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
-		return ret;
+		return false;
- return 0;
+	return true;
  }
/*
@@ -1674,35 +1672,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  		 * only when the page is being freed somewhere else.
  		 */
  		scan += nr_pages;
-		switch (__isolate_lru_page_prepare(page, mode)) {
-		case 0:
-			/*
-			 * Be careful not to clear PageLRU until after we're
-			 * sure the page is not being freed elsewhere -- the
-			 * page release code relies on it.
-			 */
-			if (unlikely(!get_page_unless_zero(page)))
-				goto busy;
-
-			if (!TestClearPageLRU(page)) {
-				/*
-				 * This page may in other isolation path,
-				 * but we still hold lru_lock.
-				 */
-				put_page(page);
-				goto busy;
-			}
-
-			nr_taken += nr_pages;
-			nr_zone_taken[page_zonenum(page)] += nr_pages;
-			list_move(&page->lru, dst);
-			break;
+		if (!__isolate_lru_page_prepare(page, mode)) {
+			/* It is being freed elsewhere */
+			list_move(&page->lru, src);
+			continue;
+		}
+		/*
+		 * Be careful not to clear PageLRU until after we're
+		 * sure the page is not being freed elsewhere -- the
+		 * page release code relies on it.
+		 */
+		if (unlikely(!get_page_unless_zero(page))) {
+			list_move(&page->lru, src);
+			continue;
+		}
- default:
-busy:
-			/* else it is being freed elsewhere */
+		if (!TestClearPageLRU(page)) {
+			/* Another thread is already isolating this page */
+			put_page(page);
  			list_move(&page->lru, src);
+			continue;
  		}
+
+		nr_taken += nr_pages;
+		nr_zone_taken[page_zonenum(page)] += nr_pages;
+		list_move(&page->lru, dst);
  	}
/*






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux