Since we introduced relock_page_lruvec, we could use it in more place to reduce spin_locks. Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Chris Down <chris@xxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: swkhack <swkhack@xxxxxxxxx> Cc: "Potyra, Stefan" <Stefan.Potyra@xxxxxxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx> Cc: Peng Fan <peng.fan@xxxxxxx> Cc: Nikolay Borisov <nborisov@xxxxxxxx> Cc: Ira Weiny <ira.weiny@xxxxxxxxx> Cc: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Cc: Yafang Shao <laoar.shao@xxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: cgroups@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx --- mm/swap.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 05fee145e382..a023e6095bd9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -197,11 +197,12 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - lruvec = lock_page_lruvec_irqsave(page, page_pgdat(page), &flags); + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); (*move_fn)(page, lruvec, arg); - spin_unlock_irqrestore(&lruvec->lru_lock, flags); } + if (lruvec) + spin_unlock_irqrestore(&lruvec->lru_lock, flags); release_pages(pvec->pages, pvec->nr); pagevec_reinit(pvec); @@ -820,15 +821,12 @@ void release_pages(struct page **pages, int nr) } if (PageLRU(page)) { - struct lruvec *new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + struct lruvec *pre_lruvec = lruvec; - if (new_lruvec != lruvec) { - if (lruvec) - spin_unlock_irqrestore(&lruvec->lru_lock, flags); + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); + if (pre_lruvec != lruvec) lock_batch = 0; - lruvec = lock_page_lruvec_irqsave(page, page_pgdat(page), &flags); - } VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); -- 1.8.3.1