During the pagevec locking, a new page's lruvec is may same as previous one. Thus we could save a re-locking, and only change lock iff lruvec is new. Function named relock_page_lruvec following Hugh Dickins patch. The first version of this patch used rcu_read_lock to guard lruvec assign and comparsion with locked_lruvev in relock_page_lruvec. But Rong Chen <rong.a.chen@xxxxxxxxx> report a regression with PROVE_LOCKING config. The rcu_read locking causes qspinlock waiting to be locked for too long. Since we had hold a spinlock, rcu_read locking isn't necessary. [lkp@xxxxxxxxx: Fix RCU-related regression reported by LKP robot] Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Chris Down <chris@xxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: swkhack <swkhack@xxxxxxxxx> Cc: "Potyra, Stefan" <Stefan.Potyra@xxxxxxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx> Cc: Peng Fan <peng.fan@xxxxxxx> Cc: Nikolay Borisov <nborisov@xxxxxxxx> Cc: Ira Weiny <ira.weiny@xxxxxxxxx> Cc: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Cc: Yafang Shao <laoar.shao@xxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: cgroups@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx --- include/linux/memcontrol.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ mm/mlock.c | 16 +++++++++------- 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9538253998a6..19ff453e2822 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1291,6 +1291,50 @@ static inline void dec_lruvec_page_state(struct page *page, mod_lruvec_page_state(page, idx, -1); } +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irq(struct page *page, + struct lruvec *locked_lruvec) +{ + struct pglist_data *pgdat = page_pgdat(page); + struct lruvec *lruvec; + + if (!locked_lruvec) + goto lock; + + lruvec = mem_cgroup_page_lruvec(page, pgdat); + + if (locked_lruvec == lruvec) + return lruvec; + + spin_unlock_irq(&locked_lruvec->lru_lock); + +lock: + lruvec = lock_page_lruvec_irq(page, pgdat); + return lruvec; +} + +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, + struct lruvec *locked_lruvec, unsigned long *flags) +{ + struct pglist_data *pgdat = page_pgdat(page); + struct lruvec *lruvec; + + if (!locked_lruvec) + goto lock; + + lruvec = mem_cgroup_page_lruvec(page, pgdat); + + if (locked_lruvec == lruvec) + return lruvec; + + spin_unlock_irqrestore(&locked_lruvec->lru_lock, *flags); + +lock: + lruvec = lock_page_lruvec_irqsave(page, pgdat, flags); + return lruvec; +} + #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); diff --git a/mm/mlock.c b/mm/mlock.c index b509b80b8513..8b3a97b62c0a 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -290,6 +290,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); + int delta_munlocked = -nr; struct pagevec pvec_putback; struct lruvec *lruvec = NULL; int pgrescued = 0; @@ -300,20 +301,19 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; - lruvec = lock_page_lruvec_irq(page, page_pgdat(page)); + lruvec = relock_page_lruvec_irq(page, lruvec); if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ - if (__munlock_isolate_lru_page(page, lruvec, false)) { - __mod_zone_page_state(zone, NR_MLOCK, -1); - spin_unlock_irq(&lruvec->lru_lock); + if (__munlock_isolate_lru_page(page, lruvec, false)) continue; - } else + else __munlock_isolation_failed(page); - } + } else + delta_munlocked++; /* * We won't be munlocking this page in the next phase @@ -323,8 +323,10 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; - spin_unlock_irq(&lruvec->lru_lock); } + __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); + if (lruvec) + spin_unlock_irq(&lruvec->lru_lock); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); -- 1.8.3.1