We need to make sure that the page is deleted from or added to the correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid users. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- include/linux/mm_inline.h | 15 ++++++++++++--- mm/vmscan.c | 1 - 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index e2ec68b0515c..60eb827a41fe 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -103,7 +103,10 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec) { - lruvec_add_folio(lruvec, page_folio(page)); + struct folio *folio = page_folio(page); + + VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + lruvec_add_folio(lruvec, folio); } static __always_inline @@ -119,7 +122,10 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec) { - lruvec_add_folio_tail(lruvec, page_folio(page)); + struct folio *folio = page_folio(page); + + VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + lruvec_add_folio_tail(lruvec, folio); } static __always_inline @@ -133,6 +139,9 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec) { - lruvec_del_folio(lruvec, page_folio(page)); + struct folio *folio = page_folio(page); + + VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + lruvec_del_folio(lruvec, folio); } #endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 8ce42858ad5d..902d36ec91a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2204,7 +2204,6 @@ static unsigned int move_pages_to_lru(struct list_head *list) continue; } - VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page); add_page_to_lru_list(page, lruvec); nr_pages = thp_nr_pages(page); nr_moved += nr_pages; -- 2.11.0