From: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> [ Upstream commit c2135f7c570bc274035834848d9bf46ea89ba763 ] The function just returns 2 results, so using a 'switch' to deal with its result is unnecessary. Also simplify it to a bool func as Vlastimil suggested. Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's suggestion to update comments in function. Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@xxxxxxxxxxxxxxxxx Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- include/linux/swap.h | 2 +- mm/compaction.c | 2 +- mm/vmscan.c | 68 ++++++++++++++++++++------------------------ 3 files changed, 33 insertions(+), 39 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 3577d3a6ec37..394d5de5d4b4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -358,7 +358,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); +extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, diff --git a/mm/compaction.c b/mm/compaction.c index ba3e907f03b7..ea46aadc7c21 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -980,7 +980,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (unlikely(!get_page_unless_zero(page))) goto isolate_fail; - if (__isolate_lru_page_prepare(page, isolate_mode) != 0) + if (!__isolate_lru_page_prepare(page, isolate_mode)) goto isolate_fail_put; /* Try isolate the page */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 5ada402c8d95..00a47845a15b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1543,19 +1543,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, * page: page to consider * mode: one of the LRU isolation modes defined above * - * returns 0 on success, -ve errno on failure. + * returns true on success, false on failure. */ -int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) +bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) { - int ret = -EBUSY; - /* Only take pages on the LRU. */ if (!PageLRU(page)) - return ret; + return false; /* Compaction should not handle unevictable pages but CMA can do so */ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) - return ret; + return false; /* * To minimise LRU disruption, the caller can indicate that it only @@ -1568,7 +1566,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) if (mode & ISOLATE_ASYNC_MIGRATE) { /* All the caller can do on PageWriteback is block */ if (PageWriteback(page)) - return ret; + return false; if (PageDirty(page)) { struct address_space *mapping; @@ -1584,20 +1582,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) * from the page cache. */ if (!trylock_page(page)) - return ret; + return false; mapping = page_mapping(page); migrate_dirty = !mapping || mapping->a_ops->migratepage; unlock_page(page); if (!migrate_dirty) - return ret; + return false; } } if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) - return ret; + return false; - return 0; + return true; } /* @@ -1679,35 +1677,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, * only when the page is being freed somewhere else. */ scan += nr_pages; - switch (__isolate_lru_page_prepare(page, mode)) { - case 0: - /* - * Be careful not to clear PageLRU until after we're - * sure the page is not being freed elsewhere -- the - * page release code relies on it. - */ - if (unlikely(!get_page_unless_zero(page))) - goto busy; - - if (!TestClearPageLRU(page)) { - /* - * This page may in other isolation path, - * but we still hold lru_lock. - */ - put_page(page); - goto busy; - } - - nr_taken += nr_pages; - nr_zone_taken[page_zonenum(page)] += nr_pages; - list_move(&page->lru, dst); - break; + if (!__isolate_lru_page_prepare(page, mode)) { + /* It is being freed elsewhere */ + list_move(&page->lru, src); + continue; + } + /* + * Be careful not to clear PageLRU until after we're + * sure the page is not being freed elsewhere -- the + * page release code relies on it. + */ + if (unlikely(!get_page_unless_zero(page))) { + list_move(&page->lru, src); + continue; + } - default: -busy: - /* else it is being freed elsewhere */ + if (!TestClearPageLRU(page)) { + /* Another thread is already isolating this page */ + put_page(page); list_move(&page->lru, src); + continue; } + + nr_taken += nr_pages; + nr_zone_taken[page_zonenum(page)] += nr_pages; + list_move(&page->lru, dst); } /* -- 2.35.1