Move scattered VM_BUG_ONs to two essential places that cover all lru list additions and deletions. Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx> --- include/linux/mm_inline.h | 4 ++++ mm/swap.c | 2 -- mm/vmscan.c | 1 - 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 07d9a0286635..7183c7a03f09 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -51,6 +51,8 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, */ static __always_inline void __clear_page_lru_flags(struct page *page) { + VM_BUG_ON_PAGE(!PageLRU(page), page); + __ClearPageLRU(page); /* this shouldn't happen, so leave the flags to bad_page() */ @@ -72,6 +74,8 @@ static __always_inline enum lru_list page_lru(struct page *page) { enum lru_list lru; + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + if (PageUnevictable(page)) return LRU_UNEVICTABLE; diff --git a/mm/swap.c b/mm/swap.c index b252f3593c57..4daa46907dd5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -85,7 +85,6 @@ static void __page_cache_release(struct page *page) spin_lock_irqsave(&pgdat->lru_lock, flags); lruvec = mem_cgroup_page_lruvec(page, pgdat); - VM_BUG_ON_PAGE(!PageLRU(page), page); del_page_from_lru_list(page, lruvec); __clear_page_lru_flags(page); spin_unlock_irqrestore(&pgdat->lru_lock, flags); @@ -885,7 +884,6 @@ void release_pages(struct page **pages, int nr) } lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); - VM_BUG_ON_PAGE(!PageLRU(page), page); del_page_from_lru_list(page, lruvec); __clear_page_lru_flags(page); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d93033407200..4688e495c242 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4276,7 +4276,6 @@ void check_move_unevictable_pages(struct pagevec *pvec) continue; if (page_evictable(page)) { - VM_BUG_ON_PAGE(PageActive(page), page); del_page_from_lru_list(page, lruvec); ClearPageUnevictable(page); add_page_to_lru_list(page, lruvec); -- 2.28.0.681.g6f77f65b4e-goog