[PATCH] mm:vmscan remove unnecessary lru lock unlock/lock pair

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



There's code redundant in move_pages_to_lru. When there're multiple of mlocked pages
or compound pages, the original implementation tries to unlock and then lock to handle
some exceptional case.

Signed-off-by: Yongmei Xie <yongmeixie@xxxxxxxxxxx>
---
 mm/vmscan.c | 32 ++++++++++++++++++++++++--------
 1 file changed, 24 insertions(+), 8 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 74296c2d1fed..c19c6c572ba3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2156,6 +2156,8 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 {
 	int nr_pages, nr_moved = 0;
 	LIST_HEAD(pages_to_free);
+	LIST_HEAD(pages_to_putback);
+	LIST_HEAD(compound_pages_to_free);
 	struct page *page;
 
 	while (!list_empty(list)) {
@@ -2163,9 +2165,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 		VM_BUG_ON_PAGE(PageLRU(page), page);
 		list_del(&page->lru);
 		if (unlikely(!page_evictable(page))) {
-			spin_unlock_irq(&lruvec->lru_lock);
-			putback_lru_page(page);
-			spin_lock_irq(&lruvec->lru_lock);
+			list_move(&page->lru, &pages_to_putback);
 			continue;
 		}
 
@@ -2185,11 +2185,9 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 		if (unlikely(put_page_testzero(page))) {
 			__clear_page_lru_flags(page);
 
-			if (unlikely(PageCompound(page))) {
-				spin_unlock_irq(&lruvec->lru_lock);
-				destroy_compound_page(page);
-				spin_lock_irq(&lruvec->lru_lock);
-			} else
+			if (unlikely(PageCompound(page)))
+				list_move(&page->lru, &compound_pages_to_free);
+			else
 				list_add(&page->lru, &pages_to_free);
 
 			continue;
@@ -2207,6 +2205,24 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 			workingset_age_nonresident(lruvec, nr_pages);
 	}
 
+	/*
+	 * Putback as a batch to reduce unlock/lock pair for unevictable pages
+	 */
+	spin_unlock_irq(&lruvec->lru_lock);
+	while (!list_empty(&pages_to_putback)) {
+		page = lru_to_page(&pages_to_putback);
+		putback_lru_page(page);
+	}
+
+	/*
+	 * Free compound page as a batch to reduce unnecessary unlock/lock
+	 */
+	while (!list_empty(&compound_pages_to_free)) {
+		page = lru_to_page(&compound_pages_to_free);
+		destroy_compound_page(page);
+	}
+	spin_lock_irq(&lruvec->lru_lock);
+
 	/*
 	 * To save our caller's stack, now use input list for pages to free.
 	 */
-- 
2.18.2





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux