Patch "mm: migrate: record the mlocked page status to remove unnecessary lru drain" has been added to the 6.6-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    mm: migrate: record the mlocked page status to remove unnecessary lru drain

to the 6.6-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     mm-migrate-record-the-mlocked-page-status-to-remove-.patch
and it can be found in the queue-6.6 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit dde8f0daf174b28516ba76ada18614d6f9c62f31
Author: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
Date:   Sat Oct 21 12:33:22 2023 +0800

    mm: migrate: record the mlocked page status to remove unnecessary lru drain
    
    [ Upstream commit eebb3dabbb5cc590afe32880b5d3726d0fbf88db ]
    
    When doing compaction, I found the lru_add_drain() is an obvious hotspot
    when migrating pages. The distribution of this hotspot is as follows:
       - 18.75% compact_zone
          - 17.39% migrate_pages
             - 13.79% migrate_pages_batch
                - 11.66% migrate_folio_move
                   - 7.02% lru_add_drain
                      + 7.02% lru_add_drain_cpu
                   + 3.00% move_to_new_folio
                     1.23% rmap_walk
                + 1.92% migrate_folio_unmap
             + 3.20% migrate_pages_sync
          + 0.90% isolate_migratepages
    
    The lru_add_drain() was added by commit c3096e6782b7 ("mm/migrate:
    __unmap_and_move() push good newpage to LRU") to drain the newpage to LRU
    immediately, to help to build up the correct newpage->mlock_count in
    remove_migration_ptes() for mlocked pages.  However, if there are no
    mlocked pages are migrating, then we can avoid this lru drain operation,
    especailly for the heavy concurrent scenarios.
    
    So we can record the source pages' mlocked status in
    migrate_folio_unmap(), and only drain the lru list when the mlocked status
    is set in migrate_folio_move().
    
    In addition, the page was already isolated from lru when migrating, so
    checking the mlocked status is stable by folio_test_mlocked() in
    migrate_folio_unmap().
    
    After this patch, I can see the hotpot of the lru_add_drain() is gone:
       - 9.41% migrate_pages_batch
          - 6.15% migrate_folio_move
             - 3.64% move_to_new_folio
                + 1.80% migrate_folio_extra
                + 1.70% buffer_migrate_folio
             + 1.41% rmap_walk
             + 0.62% folio_add_lru
          + 3.07% migrate_folio_unmap
    
    Meanwhile, the compaction latency shows some improvements when running
    thpscale:
                                base                   patched
    Amean     fault-both-1      1131.22 (   0.00%)     1112.55 *   1.65%*
    Amean     fault-both-3      2489.75 (   0.00%)     2324.15 *   6.65%*
    Amean     fault-both-5      3257.37 (   0.00%)     3183.18 *   2.28%*
    Amean     fault-both-7      4257.99 (   0.00%)     4079.04 *   4.20%*
    Amean     fault-both-12     6614.02 (   0.00%)     6075.60 *   8.14%*
    Amean     fault-both-18    10607.78 (   0.00%)     8978.86 *  15.36%*
    Amean     fault-both-24    14911.65 (   0.00%)    11619.55 *  22.08%*
    Amean     fault-both-30    14954.67 (   0.00%)    14925.66 *   0.19%*
    Amean     fault-both-32    16654.87 (   0.00%)    15580.31 *   6.45%*
    
    Link: https://lkml.kernel.org/r/06e9153a7a4850352ec36602df3a3a844de45698.1697859741.git.baolin.wang@xxxxxxxxxxxxxxxxx
    Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
    Reviewed-by: "Huang, Ying" <ying.huang@xxxxxxxxx>
    Reviewed-by: Zi Yan <ziy@xxxxxxxxxx>
    Cc: Hugh Dickins <hughd@xxxxxxxxxx>
    Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
    Cc: Vlastimil Babka <vbabka@xxxxxxx>
    Cc: Yin Fengwei <fengwei.yin@xxxxxxxxx>
    Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
    Stable-dep-of: d1adb25df711 ("mm: migrate: fix getting incorrect page mapping during page migration")
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/mm/migrate.c b/mm/migrate.c
index 03bc2063ac87..3373fc1c2d0f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1035,22 +1035,28 @@ union migration_ptr {
 	struct anon_vma *anon_vma;
 	struct address_space *mapping;
 };
+
+enum {
+	PAGE_WAS_MAPPED = BIT(0),
+	PAGE_WAS_MLOCKED = BIT(1),
+};
+
 static void __migrate_folio_record(struct folio *dst,
-				   unsigned long page_was_mapped,
+				   unsigned long old_page_state,
 				   struct anon_vma *anon_vma)
 {
 	union migration_ptr ptr = { .anon_vma = anon_vma };
 	dst->mapping = ptr.mapping;
-	dst->private = (void *)page_was_mapped;
+	dst->private = (void *)old_page_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
-				   int *page_was_mappedp,
+				   int *old_page_state,
 				   struct anon_vma **anon_vmap)
 {
 	union migration_ptr ptr = { .mapping = dst->mapping };
 	*anon_vmap = ptr.anon_vma;
-	*page_was_mappedp = (unsigned long)dst->private;
+	*old_page_state = (unsigned long)dst->private;
 	dst->mapping = NULL;
 	dst->private = NULL;
 }
@@ -1111,7 +1117,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 {
 	struct folio *dst;
 	int rc = -EAGAIN;
-	int page_was_mapped = 0;
+	int old_page_state = 0;
 	struct anon_vma *anon_vma = NULL;
 	bool is_lru = !__PageMovable(&src->page);
 	bool locked = false;
@@ -1165,6 +1171,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 		folio_lock(src);
 	}
 	locked = true;
+	if (folio_test_mlocked(src))
+		old_page_state |= PAGE_WAS_MLOCKED;
 
 	if (folio_test_writeback(src)) {
 		/*
@@ -1214,7 +1222,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 	dst_locked = true;
 
 	if (unlikely(!is_lru)) {
-		__migrate_folio_record(dst, page_was_mapped, anon_vma);
+		__migrate_folio_record(dst, old_page_state, anon_vma);
 		return MIGRATEPAGE_UNMAP;
 	}
 
@@ -1240,11 +1248,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
 			       !folio_test_ksm(src) && !anon_vma, src);
 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
-		page_was_mapped = 1;
+		old_page_state |= PAGE_WAS_MAPPED;
 	}
 
 	if (!folio_mapped(src)) {
-		__migrate_folio_record(dst, page_was_mapped, anon_vma);
+		__migrate_folio_record(dst, old_page_state, anon_vma);
 		return MIGRATEPAGE_UNMAP;
 	}
 
@@ -1256,7 +1264,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 	if (rc == -EAGAIN)
 		ret = NULL;
 
-	migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
+	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+			       anon_vma, locked, ret);
 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
 
 	return rc;
@@ -1269,12 +1278,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 			      struct list_head *ret)
 {
 	int rc;
-	int page_was_mapped = 0;
+	int old_page_state = 0;
 	struct anon_vma *anon_vma = NULL;
 	bool is_lru = !__PageMovable(&src->page);
 	struct list_head *prev;
 
-	__migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
 	prev = dst->lru.prev;
 	list_del(&dst->lru);
 
@@ -1295,10 +1304,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 	 * isolated from the unevictable LRU: but this case is the easiest.
 	 */
 	folio_add_lru(dst);
-	if (page_was_mapped)
+	if (old_page_state & PAGE_WAS_MLOCKED)
 		lru_add_drain();
 
-	if (page_was_mapped)
+	if (old_page_state & PAGE_WAS_MAPPED)
 		remove_migration_ptes(src, dst, false);
 
 out_unlock_both:
@@ -1330,11 +1339,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 	 */
 	if (rc == -EAGAIN) {
 		list_add(&dst->lru, prev);
-		__migrate_folio_record(dst, page_was_mapped, anon_vma);
+		__migrate_folio_record(dst, old_page_state, anon_vma);
 		return rc;
 	}
 
-	migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
+	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+			       anon_vma, true, ret);
 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
 
 	return rc;
@@ -1802,12 +1812,12 @@ static int migrate_pages_batch(struct list_head *from,
 	dst = list_first_entry(&dst_folios, struct folio, lru);
 	dst2 = list_next_entry(dst, lru);
 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-		int page_was_mapped = 0;
+		int old_page_state = 0;
 		struct anon_vma *anon_vma = NULL;
 
-		__migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
-		migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
-				       true, ret_folios);
+		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
+		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+				       anon_vma, true, ret_folios);
 		list_del(&dst->lru);
 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
 		dst = dst2;




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux