在 2020/7/25 下午8:59, Alex Shi 写道: > We don't have to add a freeable page into lru and then remove from it. > This change saves a couple of actions and makes the moving more clear. > > The SetPageLRU needs to be kept here for list intergrity. > Otherwise: > #0 mave_pages_to_lru #1 release_pages > if (put_page_testzero()) > if !put_page_testzero > !PageLRU //skip lru_lock > list_add(&page->lru,) > list_add(&page->lru,) //corrupt The race comments should be corrected to this: /* * The SetPageLRU needs to be kept here for list intergrity. * Otherwise: * #0 mave_pages_to_lru #1 release_pages * if !put_page_testzero * if (put_page_testzero()) * !PageLRU //skip lru_lock * SetPageLRU() * list_add(&page->lru,) * list_add(&page->lru,) */ > > [akpm@xxxxxxxxxxxxxxxxxxxx: coding style fixes] > Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > Cc: Johannes Weiner <hannes@xxxxxxxxxxx> > Cc: Tejun Heo <tj@xxxxxxxxxx> > Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> > Cc: Hugh Dickins <hughd@xxxxxxxxxx> > Cc: linux-mm@xxxxxxxxx > Cc: linux-kernel@xxxxxxxxxxxxxxx > --- > mm/vmscan.c | 37 ++++++++++++++++++++++++------------- > 1 file changed, 24 insertions(+), 13 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 749d239c62b2..ddb29d813d77 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -1856,26 +1856,29 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, > while (!list_empty(list)) { > page = lru_to_page(list); > VM_BUG_ON_PAGE(PageLRU(page), page); > + list_del(&page->lru); > if (unlikely(!page_evictable(page))) { > - list_del(&page->lru); > spin_unlock_irq(&pgdat->lru_lock); > putback_lru_page(page); > spin_lock_irq(&pgdat->lru_lock); > continue; > } > - lruvec = mem_cgroup_page_lruvec(page, pgdat); > > + /* > + * The SetPageLRU needs to be kept here for list intergrity. > + * Otherwise: > + * #0 mave_pages_to_lru #1 release_pages > + * if (put_page_testzero()) > + * if !put_page_testzero > + * !PageLRU //skip lru_lock > + * list_add(&page->lru,) > + * list_add(&page->lru,) //corrupt > + */ /* * The SetPageLRU needs to be kept here for list intergrity. * Otherwise: * #0 mave_pages_to_lru #1 release_pages * if !put_page_testzero * if (put_page_testzero()) * !PageLRU //skip lru_lock * SetPageLRU() * list_add(&page->lru,) * list_add(&page->lru,) */ > SetPageLRU(page); > - lru = page_lru(page); > > - nr_pages = hpage_nr_pages(page); > - update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); > - list_move(&page->lru, &lruvec->lists[lru]); > - > - if (put_page_testzero(page)) { > + if (unlikely(put_page_testzero(page))) { > __ClearPageLRU(page); > __ClearPageActive(page); > - del_page_from_lru_list(page, lruvec, lru); > > if (unlikely(PageCompound(page))) { > spin_unlock_irq(&pgdat->lru_lock); > @@ -1883,11 +1886,19 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, > spin_lock_irq(&pgdat->lru_lock); > } else > list_add(&page->lru, &pages_to_free); > - } else { > - nr_moved += nr_pages; > - if (PageActive(page)) > - workingset_age_nonresident(lruvec, nr_pages); > + > + continue; > } > + > + lruvec = mem_cgroup_page_lruvec(page, pgdat); > + lru = page_lru(page); > + nr_pages = hpage_nr_pages(page); > + > + update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); > + list_add(&page->lru, &lruvec->lists[lru]); > + nr_moved += nr_pages; > + if (PageActive(page)) > + workingset_age_nonresident(lruvec, nr_pages); > } > > /* >