From: Alex Shi <alexs@xxxxxxxxxx> The function could be fully replaced by lruvec_del_folio(), no reason to keep a duplicate one. Signed-off-by: Alex Shi <alexs@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Cc: Alex Shi <alexs@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx --- include/linux/mm_inline.h | 6 ------ mm/compaction.c | 2 +- mm/mlock.c | 2 +- mm/swap.c | 10 +++++----- mm/vmscan.c | 4 ++-- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 4df5b39cc97b..a66c08079675 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -110,12 +110,6 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) -folio_nr_pages(folio)); } -static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec) -{ - lruvec_del_folio(lruvec, page_folio(page)); -} - #ifdef CONFIG_ANON_VMA_NAME /* * mmap_lock should be read-locked when calling vma_anon_name() and while using diff --git a/mm/compaction.c b/mm/compaction.c index 12f2af6ac484..385e0bb7aad5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1064,7 +1064,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, low_pfn += compound_nr(page) - 1; /* Successfully isolated */ - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + folio_is_file_lru(page_folio(page)), thp_nr_pages(page)); diff --git a/mm/mlock.c b/mm/mlock.c index 8f584eddd305..6b64758b5d8c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -280,7 +280,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) */ if (TestClearPageLRU(page)) { lruvec = folio_lruvec_relock_irq(folio, lruvec); - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); continue; } else __munlock_isolation_failed(page); diff --git a/mm/swap.c b/mm/swap.c index 23c0afb76be6..359821740e0f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -85,7 +85,7 @@ static void __page_cache_release(struct page *page) unsigned long flags; lruvec = folio_lruvec_lock_irqsave(folio, &flags); - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); __folio_clear_lru_flags(page_folio(page)); unlock_page_lruvec_irqrestore(lruvec, flags); } @@ -533,7 +533,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) if (page_mapped(page)) return; - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); ClearPageActive(page); ClearPageReferenced(page); @@ -566,7 +566,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) if (PageActive(page) && !PageUnevictable(page)) { int nr_pages = thp_nr_pages(page); - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); ClearPageActive(page); ClearPageReferenced(page); lruvec_add_folio(lruvec, page_folio(page)); @@ -583,7 +583,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) !PageSwapCache(page) && !PageUnevictable(page)) { int nr_pages = thp_nr_pages(page); - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); ClearPageActive(page); ClearPageReferenced(page); /* @@ -965,7 +965,7 @@ void release_pages(struct page **pages, int nr) if (prev_lruvec != lruvec) lock_batch = 0; - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); __folio_clear_lru_flags(page_folio(page)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index f09473c9ff35..8ab97eac284a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2247,7 +2247,7 @@ int isolate_lru_page(struct page *page) get_page(page); lruvec = folio_lruvec_lock_irq(folio); - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); unlock_page_lruvec_irq(lruvec); ret = 0; } @@ -4873,7 +4873,7 @@ void check_move_unevictable_pages(struct pagevec *pvec) lruvec = folio_lruvec_relock_irq(folio, lruvec); if (page_evictable(page) && PageUnevictable(page)) { - del_page_from_lru_list(page, lruvec); + lruvec_del_folio(lruvec, page_folio(page)); ClearPageUnevictable(page); lruvec_add_folio(lruvec, page_folio(page)); pgrescued += nr_pages; -- 2.25.1