+ mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/migrate.c: Rework migration_entry_wait() to not take a pageref
has been added to the -mm tree.  Its filename is
     mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5.patch

This patch should soon appear at
    https://ozlabs.org/~akpm/mmots/broken-out/mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5.patch
and later at
    https://ozlabs.org/~akpm/mmotm/broken-out/mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Alistair Popple <apopple@xxxxxxxxxx>
Subject: mm/migrate.c: Rework migration_entry_wait() to not take a pageref

 - Documentation/comment fixes from David H
 - Added David's Acked-by
 - Changed functions to take swap entries instead of struct pages as
   suggested by Matthew W

Link: https://lkml.kernel.org/r/20211213033848.1973946-1-apopple@xxxxxxxxxx
Signed-off-by: Alistair Popple <apopple@xxxxxxxxxx>
Acked-by: David Hildenbrand <david@xxxxxxxxxx>
Cc: David Howells <dhowells@xxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxxxx>
Cc: Jerome Glisse <jglisse@xxxxxxxxxx>
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Cc: Ralph Campbell <rcampbell@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 include/linux/migrate.h |    2 +-
 mm/filemap.c            |   11 +++++++----
 mm/migrate.c            |   10 ++--------
 3 files changed, 10 insertions(+), 13 deletions(-)

--- a/include/linux/migrate.h~mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5
+++ a/include/linux/migrate.h
@@ -40,7 +40,7 @@ extern int migrate_huge_page_move_mappin
 				  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page, int extra_count);
-void migration_entry_wait_on_locked(struct folio *folio, pte_t *ptep,
+void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
 				spinlock_t *ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
--- a/mm/filemap.c~mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5
+++ a/mm/filemap.c
@@ -21,6 +21,7 @@
 #include <linux/gfp.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/swapops.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
@@ -1430,8 +1431,9 @@ repeat:
 #ifdef CONFIG_MIGRATION
 /**
  * migration_entry_wait_on_locked - Wait for a migration entry to be removed
- * @folio: folio referenced by the migration entry.
- * @ptep: mapped pte pointer. This function will return with the ptep unmapped.
+ * @entry: migration swap entry.
+ * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
+ *        for pte entries, pass NULL for pmd entries.
  * @ptl: already locked ptl. This function will drop the lock.
  *
  * Wait for a migration entry referencing the given page to be removed. This is
@@ -1442,10 +1444,10 @@ repeat:
  *
  * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
  *
- * This follows the same logic as wait_on_page_bit_common() so see the comments
+ * This follows the same logic as folio_wait_bit_common() so see the comments
  * there.
  */
-void migration_entry_wait_on_locked(struct folio *folio, pte_t *ptep,
+void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
 				spinlock_t *ptl)
 {
 	struct wait_page_queue wait_page;
@@ -1454,6 +1456,7 @@ void migration_entry_wait_on_locked(stru
 	bool delayacct = false;
 	unsigned long pflags;
 	wait_queue_head_t *q;
+	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
 
 	q = folio_waitqueue(folio);
 	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
--- a/mm/migrate.c~mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5
+++ a/mm/migrate.c
@@ -291,7 +291,6 @@ void __migration_entry_wait(struct mm_st
 {
 	pte_t pte;
 	swp_entry_t entry;
-	struct page *page;
 
 	spin_lock(ptl);
 	pte = *ptep;
@@ -302,10 +301,7 @@ void __migration_entry_wait(struct mm_st
 	if (!is_migration_entry(entry))
 		goto out;
 
-	page = pfn_swap_entry_to_page(entry);
-	page = compound_head(page);
-
-	migration_entry_wait_on_locked(page_folio(page), ptep, ptl);
+	migration_entry_wait_on_locked(entry, ptep, ptl);
 	return;
 out:
 	pte_unmap_unlock(ptep, ptl);
@@ -330,13 +326,11 @@ void migration_entry_wait_huge(struct vm
 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 {
 	spinlock_t *ptl;
-	struct page *page;
 
 	ptl = pmd_lock(mm, pmd);
 	if (!is_pmd_migration_entry(*pmd))
 		goto unlock;
-	page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
-	migration_entry_wait_on_locked(page_folio(page), NULL, ptl);
+	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
 	return;
 unlock:
 	spin_unlock(ptl);
_

Patches currently in -mm which might be from apopple@xxxxxxxxxx are

mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref.patch
mm-migratec-rework-migration_entry_wait-to-not-take-a-pageref-v5.patch
mm-hmmc-allow-vm_mixedmap-to-work-with-hmm_range_fault.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux