This patch enables thp migration for move_pages(2). Signed-off-by: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> --- mm/migrate.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git v4.9-rc2-mmotm-2016-10-27-18-27/mm/migrate.c v4.9-rc2-mmotm-2016-10-27-18-27_patched/mm/migrate.c index 97ab8d9..6a589b9 100644 --- v4.9-rc2-mmotm-2016-10-27-18-27/mm/migrate.c +++ v4.9-rc2-mmotm-2016-10-27-18-27_patched/mm/migrate.c @@ -1443,7 +1443,17 @@ static struct page *new_page_node(struct page *p, unsigned long private, if (PageHuge(p)) return alloc_huge_page_node(page_hstate(compound_head(p)), pm->node); - else + else if (thp_migration_supported() && PageTransHuge(p)) { + struct page *thp; + + thp = alloc_pages_node(pm->node, + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM, + HPAGE_PMD_ORDER); + if (!thp) + return NULL; + prep_transhuge_page(thp); + return thp; + } else return __alloc_pages_node(pm->node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } @@ -1470,6 +1480,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, for (pp = pm; pp->node != MAX_NUMNODES; pp++) { struct vm_area_struct *vma; struct page *page; + struct page *head; + unsigned int follflags; err = -EFAULT; vma = find_vma(mm, pp->addr); @@ -1477,8 +1489,10 @@ static int do_move_page_to_node_array(struct mm_struct *mm, goto set_status; /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, pp->addr, - FOLL_GET | FOLL_SPLIT | FOLL_DUMP); + follflags = FOLL_GET | FOLL_SPLIT | FOLL_DUMP; + if (thp_migration_supported()) + follflags &= ~FOLL_SPLIT; + page = follow_page(vma, pp->addr, follflags); err = PTR_ERR(page); if (IS_ERR(page)) @@ -1488,7 +1502,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!page) goto set_status; - pp->page = page; err = page_to_nid(page); if (err == pp->node) @@ -1503,16 +1516,22 @@ static int do_move_page_to_node_array(struct mm_struct *mm, goto put_and_set; if (PageHuge(page)) { - if (PageHead(page)) + if (PageHead(page)) { isolate_huge_page(page, &pagelist); + err = 0; + pp->page = page; + } goto put_and_set; } - err = isolate_lru_page(page); + pp->page = compound_head(page); + head = compound_head(page); + err = isolate_lru_page(head); if (!err) { - list_add_tail(&page->lru, &pagelist); - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); + list_add_tail(&head->lru, &pagelist); + mod_node_page_state(page_pgdat(head), + NR_ISOLATED_ANON + page_is_file_cache(head), + hpage_nr_pages(head)); } put_and_set: /* -- 2.7.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>