Migration is completely generalized. Signed-off-by: Gioh Kim <gioh.kim@xxxxxxx> --- mm/balloon_compaction.c | 8 -------- mm/migrate.c | 15 --------------- 2 files changed, 23 deletions(-) diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index f98a500..d29270aa 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -206,13 +206,6 @@ int balloon_page_migrate(struct address_space *mapping, if (!isolated_balloon_page(page)) return rc; - /* - * Block others from accessing the 'newpage' when we get around to - * establishing additional references. We should be the only one - * holding a reference to the 'newpage' at this point. - */ - BUG_ON(!trylock_page(newpage)); - if (WARN_ON(!__is_movable_balloon_page(page))) { dump_page(page, "not movable balloon page"); unlock_page(newpage); @@ -222,7 +215,6 @@ int balloon_page_migrate(struct address_space *mapping, if (balloon && balloon->migratepage) rc = balloon->migratepage(balloon, newpage, page, mode); - unlock_page(newpage); return rc; } diff --git a/mm/migrate.c b/mm/migrate.c index 649b1cd..ca47b3e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -844,21 +844,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage, } } - if (unlikely(driver_page_migratable(page))) { - /* - * A migratable-page does not need any special attention from - * physical to virtual reverse mapping procedures. - * Skip any attempt to unmap PTEs or to remap swap cache, - * in order to avoid burning cycles at rmap level, and perform - * the page migration right away (proteced by page lock). - */ - rc = page->mapping->a_ops->migratepage(page->mapping, - newpage, - page, - mode); - goto out_unlock; - } - /* * Corner case handling: * 1. When a new swap-cache page is read into, it is added to the LRU -- 1.9.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>