Re: [PATCH v4 04/11] mm: vmscan: rework move_pages_to_lru()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 5/24/22 02:05, Muchun Song wrote:
In the later patch, we will reparent the LRU pages. The pages moved to
appropriate LRU list can be reparented during the process of the
move_pages_to_lru(). So holding a lruvec lock by the caller is wrong, we
should use the more general interface of folio_lruvec_relock_irq() to
acquire the correct lruvec lock.

Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
  mm/vmscan.c | 49 +++++++++++++++++++++++++------------------------
  1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1678802e03e7..761d5e0dd78d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2230,23 +2230,28 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
   * move_pages_to_lru() moves pages from private @list to appropriate LRU list.
   * On return, @list is reused as a list of pages to be freed by the caller.
   *
- * Returns the number of pages moved to the given lruvec.
+ * Returns the number of pages moved to the appropriate LRU list.
+ *
+ * Note: The caller must not hold any lruvec lock.
   */
-static unsigned int move_pages_to_lru(struct lruvec *lruvec,
-				      struct list_head *list)
+static unsigned int move_pages_to_lru(struct list_head *list)
  {
-	int nr_pages, nr_moved = 0;
+	int nr_moved = 0;
+	struct lruvec *lruvec = NULL;
  	LIST_HEAD(pages_to_free);
-	struct page *page;
while (!list_empty(list)) {
-		page = lru_to_page(list);
+		int nr_pages;
+		struct folio *folio = lru_to_folio(list);
+		struct page *page = &folio->page;
+
+		lruvec = folio_lruvec_relock_irq(folio, lruvec);
  		VM_BUG_ON_PAGE(PageLRU(page), page);
  		list_del(&page->lru);
  		if (unlikely(!page_evictable(page))) {
-			spin_unlock_irq(&lruvec->lru_lock);
+			unlock_page_lruvec_irq(lruvec);
  			putback_lru_page(page);
-			spin_lock_irq(&lruvec->lru_lock);
+			lruvec = NULL;
  			continue;
  		}
@@ -2267,20 +2272,16 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
  			__clear_page_lru_flags(page);
if (unlikely(PageCompound(page))) {
-				spin_unlock_irq(&lruvec->lru_lock);
+				unlock_page_lruvec_irq(lruvec);
  				destroy_compound_page(page);
-				spin_lock_irq(&lruvec->lru_lock);
+				lruvec = NULL;
  			} else
  				list_add(&page->lru, &pages_to_free);
continue;
  		}
- /*
-		 * All pages were isolated from the same lruvec (and isolation
-		 * inhibits memcg migration).
-		 */
-		VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
+		VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);
  		add_page_to_lru_list(page, lruvec);
  		nr_pages = thp_nr_pages(page);
  		nr_moved += nr_pages;
@@ -2288,6 +2289,8 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
  			workingset_age_nonresident(lruvec, nr_pages);
  	}
+ if (lruvec)
+		unlock_page_lruvec_irq(lruvec);
  	/*
  	 * To save our caller's stack, now use input list for pages to free.
  	 */
@@ -2359,16 +2362,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); - spin_lock_irq(&lruvec->lru_lock);
-	move_pages_to_lru(lruvec, &page_list);
+	move_pages_to_lru(&page_list);
+ local_irq_disable();
  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
  	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
  	if (!cgroup_reclaim(sc))
  		__count_vm_events(item, nr_reclaimed);
  	__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
  	__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
-	spin_unlock_irq(&lruvec->lru_lock);
+	local_irq_enable();
lru_note_cost(lruvec, file, stat.nr_pageout);
  	mem_cgroup_uncharge_list(&page_list);
@@ -2498,18 +2501,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
  	/*
  	 * Move pages back to the lru list.
  	 */
-	spin_lock_irq(&lruvec->lru_lock);
-
-	nr_activate = move_pages_to_lru(lruvec, &l_active);
-	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
+	nr_activate = move_pages_to_lru(&l_active);
+	nr_deactivate = move_pages_to_lru(&l_inactive);
  	/* Keep all free pages in l_active list */
  	list_splice(&l_inactive, &l_active);
+ local_irq_disable();
  	__count_vm_events(PGDEACTIVATE, nr_deactivate);
  	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
-
  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
-	spin_unlock_irq(&lruvec->lru_lock);
+	local_irq_enable();
mem_cgroup_uncharge_list(&l_active);
  	free_unref_page_list(&l_active);

Note that the RT engineers will likely change the local_irq_disable()/local_irq_enable() to local_lock_irq()/local_unlock_irq().

Cheers,
Longman





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux