- page-migration-simplify-migrate_pages.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     page migration: simplify migrate_pages()

has been removed from the -mm tree.  Its filename is

     page-migration-simplify-migrate_pages.patch

This patch was dropped because it was merged into mainline or a subsystem tree

------------------------------------------------------
Subject: page migration: simplify migrate_pages()
From: Christoph Lameter <clameter@xxxxxxx>


Currently migrate_pages() is mess with lots of goto.  Extract two functions
from migrate_pages() and get rid of the gotos.

Plus we can just unconditionally set the locked bit on the new page since we
are the only one holding a reference.  Locking is to stop others from
accessing the page once we establish references to the new page.

Remove the list_del from move_to_lru in order to have finer control over list
processing.

[akpm@xxxxxxxx: add debug check]
Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Cc: Hugh Dickins <hugh@xxxxxxxxxxx>
Cc: Jes Sorensen <jes@xxxxxxxxxxxxxxxxxx>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>
Cc: Lee Schermerhorn <lee.schermerhorn@xxxxxx>
Cc: Andi Kleen <ak@xxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 mm/migrate.c |  218 +++++++++++++++++++++++++------------------------
 1 file changed, 115 insertions(+), 103 deletions(-)

diff -puN mm/migrate.c~page-migration-simplify-migrate_pages mm/migrate.c
--- a/mm/migrate.c~page-migration-simplify-migrate_pages
+++ a/mm/migrate.c
@@ -84,7 +84,6 @@ int migrate_prep(void)
 
 static inline void move_to_lru(struct page *page)
 {
-	list_del(&page->lru);
 	if (PageActive(page)) {
 		/*
 		 * lru_cache_add_active checks that
@@ -110,6 +109,7 @@ int putback_lru_pages(struct list_head *
 	int count = 0;
 
 	list_for_each_entry_safe(page, page2, l, lru) {
+		list_del(&page->lru);
 		move_to_lru(page);
 		count++;
 	}
@@ -534,11 +534,108 @@ static int fallback_migrate_page(struct 
 }
 
 /*
+ * Move a page to a newly allocated page
+ * The page is locked and all ptes have been successfully removed.
+ *
+ * The new page will have replaced the old page if this function
+ * is successful.
+ */
+static int move_to_new_page(struct page *newpage, struct page *page)
+{
+	struct address_space *mapping;
+	int rc;
+
+	/*
+	 * Block others from accessing the page when we get around to
+	 * establishing additional references. We are the only one
+	 * holding a reference to the new page at this point.
+	 */
+	if (TestSetPageLocked(newpage))
+		BUG();
+
+	/* Prepare mapping for the new page.*/
+	newpage->index = page->index;
+	newpage->mapping = page->mapping;
+
+	mapping = page_mapping(page);
+	if (!mapping)
+		rc = migrate_page(mapping, newpage, page);
+	else if (mapping->a_ops->migratepage)
+		/*
+		 * Most pages have a mapping and most filesystems
+		 * should provide a migration function. Anonymous
+		 * pages are part of swap space which also has its
+		 * own migration function. This is the most common
+		 * path for page migration.
+		 */
+		rc = mapping->a_ops->migratepage(mapping,
+						newpage, page);
+	else
+		rc = fallback_migrate_page(mapping, newpage, page);
+
+	if (!rc)
+		remove_migration_ptes(page, newpage);
+	else
+		newpage->mapping = NULL;
+
+	unlock_page(newpage);
+
+	return rc;
+}
+
+/*
+ * Obtain the lock on page, remove all ptes and migrate the page
+ * to the newly allocated page in newpage.
+ */
+static int unmap_and_move(struct page *newpage, struct page *page, int force)
+{
+	int rc = 0;
+
+	if (page_count(page) == 1)
+		/* page was freed from under us. So we are done. */
+		goto ret;
+
+	rc = -EAGAIN;
+	if (TestSetPageLocked(page)) {
+		if (!force)
+			goto ret;
+		lock_page(page);
+	}
+
+	if (PageWriteback(page)) {
+		if (!force)
+			goto unlock;
+		wait_on_page_writeback(page);
+	}
+
+	/*
+	 * Establish migration ptes or remove ptes
+	 */
+	if (try_to_unmap(page, 1) != SWAP_FAIL) {
+		if (!page_mapped(page))
+			rc = move_to_new_page(newpage, page);
+	} else
+		/* A vma has VM_LOCKED set -> permanent failure */
+		rc = -EPERM;
+
+	if (rc)
+		remove_migration_ptes(page, page);
+unlock:
+	unlock_page(page);
+ret:
+	if (rc != -EAGAIN) {
+		list_del(&newpage->lru);
+		move_to_lru(newpage);
+	}
+	return rc;
+}
+
+/*
  * migrate_pages
  *
  * Two lists are passed to this function. The first list
  * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
+ * The second list contains new pages that the isolated pages
  * can be moved to.
  *
  * The function returns after 10 attempts or if no pages
@@ -550,7 +647,7 @@ static int fallback_migrate_page(struct 
 int migrate_pages(struct list_head *from, struct list_head *to,
 		  struct list_head *moved, struct list_head *failed)
 {
-	int retry;
+	int retry = 1;
 	int nr_failed = 0;
 	int pass = 0;
 	struct page *page;
@@ -561,118 +658,33 @@ int migrate_pages(struct list_head *from
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
-redo:
-	retry = 0;
+	for(pass = 0; pass < 10 && retry; pass++) {
+		retry = 0;
 
-	list_for_each_entry_safe(page, page2, from, lru) {
-		struct page *newpage = NULL;
-		struct address_space *mapping;
+		list_for_each_entry_safe(page, page2, from, lru) {
 
-		cond_resched();
+			if (list_empty(to))
+				break;
 
-		rc = 0;
-		if (page_count(page) == 1)
-			/* page was freed from under us. So we are done. */
-			goto next;
+			cond_resched();
 
-		if (to && list_empty(to))
-			break;
+			rc = unmap_and_move(lru_to_page(to), page, pass > 2);
 
-		/*
-		 * Skip locked pages during the first two passes to give the
-		 * functions holding the lock time to release the page. Later we
-		 * use lock_page() to have a higher chance of acquiring the
-		 * lock.
-		 */
-		rc = -EAGAIN;
-		if (pass > 2)
-			lock_page(page);
-		else
-			if (TestSetPageLocked(page))
-				goto next;
-
-		/*
-		 * Only wait on writeback if we have already done a pass where
-		 * we we may have triggered writeouts for lots of pages.
-		 */
-		if (pass > 0)
-			wait_on_page_writeback(page);
-		else
-			if (PageWriteback(page))
-				goto unlock_page;
-
-		/*
-		 * Establish migration ptes or remove ptes
-		 */
-		rc = -EPERM;
-		if (try_to_unmap(page, 1) == SWAP_FAIL)
-			/* A vma has VM_LOCKED set -> permanent failure */
-			goto unlock_page;
-
-		rc = -EAGAIN;
-		if (page_mapped(page))
-			goto unlock_page;
-
-		newpage = lru_to_page(to);
-		lock_page(newpage);
-		/* Prepare mapping for the new page.*/
-		newpage->index = page->index;
-		newpage->mapping = page->mapping;
-
-		/*
-		 * Pages are properly locked and writeback is complete.
-		 * Try to migrate the page.
-		 */
-		mapping = page_mapping(page);
-		if (!mapping)
-			rc = migrate_page(mapping, newpage, page);
-
-		else if (mapping->a_ops->migratepage)
-			/*
-			 * Most pages have a mapping and most filesystems
-			 * should provide a migration function. Anonymous
-			 * pages are part of swap space which also has its
-			 * own migration function. This is the most common
-			 * path for page migration.
-			 */
-			rc = mapping->a_ops->migratepage(mapping,
-							newpage, page);
-		else
-			rc = fallback_migrate_page(mapping, newpage, page);
-
-		if (!rc)
-			remove_migration_ptes(page, newpage);
-
-		unlock_page(newpage);
-
-unlock_page:
-		if (rc)
-			remove_migration_ptes(page, page);
-
-		unlock_page(page);
-
-next:
-		if (rc) {
-			if (newpage)
-				newpage->mapping = NULL;
-
-			if (rc == -EAGAIN)
+			switch(rc) {
+			case -EAGAIN:
 				retry++;
-			else {
+				break;
+			case 0:
+				list_move(&page->lru, moved);
+				break;
+			default:
 				/* Permanent failure */
 				list_move(&page->lru, failed);
 				nr_failed++;
+				break;
 			}
-		} else {
-			if (newpage) {
-				/* Successful migration. Return page to LRU */
-				move_to_lru(newpage);
-			}
-			list_move(&page->lru, moved);
 		}
 	}
-	if (retry && pass++ < 10)
-		goto redo;
 
 	if (!swapwrite)
 		current->flags &= ~PF_SWAPWRITE;
_

Patches currently in -mm which might be from clameter@xxxxxxx are

origin.patch
mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch
page-migration-support-a-vma-migration-function.patch
allow-migration-of-mlocked-pages.patch
zoned-vm-counters-create-vmstatc-h-from-page_allocc-h.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-tidy.patch
zoned-vm-counters-convert-nr_mapped-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure-fix.patch
zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch
zoned-vm-counters-zone_reclaim-remove-proc-sys-vm-zone_reclaim_interval.patch
zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagetables-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch
zoned-vm-counters-remove-useless-struct-wbs.patch
cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch
corrections-to-memory-barrier-doc.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux