Since we introduced relock_page_lruvec, we could use it in more place to reduce spin_locks. Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: cgroups@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx --- mm/swap.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 50c856246f84..74e03589adde 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -197,13 +197,15 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, if (isolation && !TestClearPageLRU(page)) continue; - lruvec = lock_page_lruvec_irqsave(page, &flags); + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); (*move_fn)(page, lruvec, arg); - unlock_page_lruvec_irqrestore(lruvec, flags); if (isolation) SetPageLRU(page); } + if (lruvec) + unlock_page_lruvec_irqrestore(lruvec, flags); + release_pages(pvec->pages, pvec->nr); pagevec_reinit(pvec); } @@ -821,14 +823,11 @@ void release_pages(struct page **pages, int nr) } if (TestClearPageLRU(page)) { - struct lruvec *new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + struct lruvec *pre_lruvec = lruvec; - if (new_lruvec != lruvec) { - if (lruvec) - unlock_page_lruvec_irqrestore(lruvec, flags); + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); + if (pre_lruvec != lruvec) lock_batch = 0; - lruvec = lock_page_lruvec_irqsave(page, &flags); - } del_page_from_lru_list(page, lruvec, page_off_lru(page)); } -- 1.8.3.1