On Mon, 2 Mar 2020 19:00:12 +0800 Alex Shi wrote: > > As Konstantin Khlebnikov mentioned: > > Also I don't like these functions: > - called lock/unlock but actually also isolates > - used just once > - pgdat evaluated twice > > Cleanup and fold these functions into commit_charge. It also reduces > lock time while lrucare && !PageLRU. > > Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> > Cc: Johannes Weiner <hannes@xxxxxxxxxxx> > Cc: Michal Hocko <mhocko@xxxxxxxxxx> > Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> > Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > Cc: cgroups@xxxxxxxxxxxxxxx > Cc: linux-mm@xxxxxxxxx > Cc: linux-kernel@xxxxxxxxxxxxxxx > --- > mm/memcontrol.c | 57 ++++++++++++++++++++------------------------------------- > 1 file changed, 20 insertions(+), 37 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index d09776cd6e10..875e2aebcde7 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -2572,41 +2572,11 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) > css_put_many(&memcg->css, nr_pages); > } > > -static void lock_page_lru(struct page *page, int *isolated) > -{ > - pg_data_t *pgdat = page_pgdat(page); > - > - spin_lock_irq(&pgdat->lru_lock); > - if (PageLRU(page)) { > - struct lruvec *lruvec; > - > - lruvec = mem_cgroup_page_lruvec(page, pgdat); > - ClearPageLRU(page); > - del_page_from_lru_list(page, lruvec, page_lru(page)); > - *isolated = 1; > - } else > - *isolated = 0; > -} > - > -static void unlock_page_lru(struct page *page, int isolated) > -{ > - pg_data_t *pgdat = page_pgdat(page); > - > - if (isolated) { > - struct lruvec *lruvec; > - > - lruvec = mem_cgroup_page_lruvec(page, pgdat); > - VM_BUG_ON_PAGE(PageLRU(page), page); > - SetPageLRU(page); > - add_page_to_lru_list(page, lruvec, page_lru(page)); > - } > - spin_unlock_irq(&pgdat->lru_lock); > -} > - > static void commit_charge(struct page *page, struct mem_cgroup *memcg, > bool lrucare) > { > - int isolated; > + struct lruvec *lruvec = NULL; > + pg_data_t *pgdat; > > VM_BUG_ON_PAGE(page->mem_cgroup, page); > > @@ -2614,9 +2584,17 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, > * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page > * may already be on some other mem_cgroup's LRU. Take care of it. > */ > - if (lrucare) > - lock_page_lru(page, &isolated); > - > + if (lrucare) { > + pgdat = page_pgdat(page); > + spin_lock_irq(&pgdat->lru_lock); > + > + if (PageLRU(page)) { > + lruvec = mem_cgroup_page_lruvec(page, pgdat); > + ClearPageLRU(page); > + del_page_from_lru_list(page, lruvec, page_lru(page)); > + } else > + spin_unlock_irq(&pgdat->lru_lock); > + } > /* > * Nobody should be changing or seriously looking at > * page->mem_cgroup at this point: > @@ -2633,8 +2611,13 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, > */ > page->mem_cgroup = memcg; > Well it is likely to update memcg for page without lru_lock held even if more care is required, which is a change added in the current semantic and worth a line of words in log. > - if (lrucare) > - unlock_page_lru(page, isolated); > + if (lrucare && lruvec) { > + lruvec = mem_cgroup_page_lruvec(page, pgdat); > + VM_BUG_ON_PAGE(PageLRU(page), page); > + SetPageLRU(page); > + add_page_to_lru_list(page, lruvec, page_lru(page)); > + spin_unlock_irq(&pgdat->lru_lock); > + } > } > > #ifdef CONFIG_MEMCG_KMEM > -- > 1.8.3.1 > >