The patch titled Subject: mm/lru: introduce relock_page_lruvec() has been added to the -mm tree. Its filename is mm-lru-introduce-the-relock_page_lruvec-function.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/mm-lru-introduce-the-relock_page_lruvec-function.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/mm-lru-introduce-the-relock_page_lruvec-function.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Alexander Duyck <alexander.h.duyck@xxxxxxxxxxxxxxx> Subject: mm/lru: introduce relock_page_lruvec() Add relock_page_lruvec() to replace repeated same code, no functional change. When testing for relock we can avoid the need for RCU locking if we simply compare the page pgdat and memcg pointers versus those that the lruvec is holding. By doing this we can avoid the extra pointer walks and accesses of the memory cgroup. In addition we can avoid the checks entirely if lruvec is currently NULL. Link: https://lkml.kernel.org/r/1604566549-62481-19-git-send-email-alex.shi@xxxxxxxxxxxxxxxxx Signed-off-by: Alexander Duyck <alexander.h.duyck@xxxxxxxxxxxxxxx> Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Acked-by: Hugh Dickins <hughd@xxxxxxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: "Chen, Rong A" <rong.a.chen@xxxxxxxxx> Cc: Daniel Jordan <daniel.m.jordan@xxxxxxxxxx> Cc: "Huang, Ying" <ying.huang@xxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Mika Penttilä <mika.penttila@xxxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Wei Yang <richard.weiyang@xxxxxxxxx> Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memcontrol.h | 52 +++++++++++++++++++++++++++++++++++ mm/mlock.c | 11 ------- mm/swap.c | 31 ++++---------------- mm/vmscan.c | 12 +------- 4 files changed, 61 insertions(+), 45 deletions(-) --- a/include/linux/memcontrol.h~mm-lru-introduce-the-relock_page_lruvec-function +++ a/include/linux/memcontrol.h @@ -485,6 +485,22 @@ out: struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); +static inline bool lruvec_holds_page_lru_lock(struct page *page, + struct lruvec *lruvec) +{ + pg_data_t *pgdat = page_pgdat(page); + const struct mem_cgroup *memcg; + struct mem_cgroup_per_node *mz; + + if (mem_cgroup_disabled()) + return lruvec == &pgdat->__lruvec; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + memcg = page->mem_cgroup ? : root_mem_cgroup; + + return lruvec->pgdat == pgdat && mz->memcg == memcg; +} + struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -1023,6 +1039,14 @@ static inline struct lruvec *mem_cgroup_ return &pgdat->__lruvec; } +static inline bool lruvec_holds_page_lru_lock(struct page *page, + struct lruvec *lruvec) +{ + pg_data_t *pgdat = page_pgdat(page); + + return lruvec == &pgdat->__lruvec; +} + static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; @@ -1465,6 +1489,34 @@ static inline void unlock_page_lruvec_ir spin_unlock_irqrestore(&lruvec->lru_lock, flags); } +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irq(struct page *page, + struct lruvec *locked_lruvec) +{ + if (locked_lruvec) { + if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + return locked_lruvec; + + unlock_page_lruvec_irq(locked_lruvec); + } + + return lock_page_lruvec_irq(page); +} + +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, + struct lruvec *locked_lruvec, unsigned long *flags) +{ + if (locked_lruvec) { + if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + return locked_lruvec; + + unlock_page_lruvec_irqrestore(locked_lruvec, *flags); + } + + return lock_page_lruvec_irqsave(page, flags); +} + #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); --- a/mm/mlock.c~mm-lru-introduce-the-relock_page_lruvec-function +++ a/mm/mlock.c @@ -277,16 +277,7 @@ static void __munlock_pagevec(struct pag * so we can spare the get_page() here. */ if (TestClearPageLRU(page)) { - struct lruvec *new_lruvec; - - new_lruvec = mem_cgroup_page_lruvec(page, - page_pgdat(page)); - if (new_lruvec != lruvec) { - if (lruvec) - unlock_page_lruvec_irq(lruvec); - lruvec = lock_page_lruvec_irq(page); - } - + lruvec = relock_page_lruvec_irq(page, lruvec); del_page_from_lru_list(page, lruvec, page_lru(page)); continue; --- a/mm/swap.c~mm-lru-introduce-the-relock_page_lruvec-function +++ a/mm/swap.c @@ -210,19 +210,12 @@ static void pagevec_lru_move_fn(struct p for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - struct lruvec *new_lruvec; /* block memcg migration during page moving between lru */ if (!TestClearPageLRU(page)) continue; - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - if (lruvec != new_lruvec) { - if (lruvec) - unlock_page_lruvec_irqrestore(lruvec, flags); - lruvec = lock_page_lruvec_irqsave(page, &flags); - } - + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); (*move_fn)(page, lruvec); SetPageLRU(page); @@ -918,17 +911,12 @@ void release_pages(struct page **pages, } if (PageLRU(page)) { - struct lruvec *new_lruvec; + struct lruvec *prev_lruvec = lruvec; - new_lruvec = mem_cgroup_page_lruvec(page, - page_pgdat(page)); - if (new_lruvec != lruvec) { - if (lruvec) - unlock_page_lruvec_irqrestore(lruvec, - flags); + lruvec = relock_page_lruvec_irqsave(page, lruvec, + &flags); + if (prev_lruvec != lruvec) lock_batch = 0; - lruvec = lock_page_lruvec_irqsave(page, &flags); - } VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); @@ -1033,15 +1021,8 @@ void __pagevec_lru_add(struct pagevec *p for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - struct lruvec *new_lruvec; - - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - if (lruvec != new_lruvec) { - if (lruvec) - unlock_page_lruvec_irqrestore(lruvec, flags); - lruvec = lock_page_lruvec_irqsave(page, &flags); - } + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); __pagevec_lru_add_fn(page, lruvec); } if (lruvec) --- a/mm/vmscan.c~mm-lru-introduce-the-relock_page_lruvec-function +++ a/mm/vmscan.c @@ -1884,8 +1884,7 @@ static unsigned noinline_for_stack move_ * All pages were isolated from the same lruvec (and isolation * inhibits memcg migration). */ - VM_BUG_ON_PAGE(mem_cgroup_page_lruvec(page, page_pgdat(page)) - != lruvec, page); + VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page); lru = page_lru(page); nr_pages = thp_nr_pages(page); @@ -4274,7 +4273,6 @@ void check_move_unevictable_pages(struct for (i = 0; i < pvec->nr; i++) { struct page *page = pvec->pages[i]; int nr_pages; - struct lruvec *new_lruvec; if (PageTransTail(page)) continue; @@ -4286,13 +4284,7 @@ void check_move_unevictable_pages(struct if (!TestClearPageLRU(page)) continue; - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - if (lruvec != new_lruvec) { - if (lruvec) - unlock_page_lruvec_irq(lruvec); - lruvec = lock_page_lruvec_irq(page); - } - + lruvec = relock_page_lruvec_irq(page, lruvec); if (page_evictable(page) && PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); _ Patches currently in -mm which might be from alexander.h.duyck@xxxxxxxxxxxxxxx are mm-lru-introduce-the-relock_page_lruvec-function.patch