On Sun, Feb 05, 2017 at 11:12:51AM -0500, Zi Yan wrote: > From: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> > > This patch enables thp migration for move_pages(2). > > Signed-off-by: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> > --- > mm/migrate.c | 37 ++++++++++++++++++++++++++++--------- > 1 file changed, 28 insertions(+), 9 deletions(-) > > diff --git a/mm/migrate.c b/mm/migrate.c > index 84181a3668c6..9bcaccb481ac 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -1413,7 +1413,17 @@ static struct page *new_page_node(struct page *p, unsigned long private, > if (PageHuge(p)) > return alloc_huge_page_node(page_hstate(compound_head(p)), > pm->node); > - else > + else if (thp_migration_supported() && PageTransHuge(p)) { > + struct page *thp; > + > + thp = alloc_pages_node(pm->node, > + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM, > + HPAGE_PMD_ORDER); > + if (!thp) > + return NULL; > + prep_transhuge_page(thp); > + return thp; > + } else > return __alloc_pages_node(pm->node, > GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); > } > @@ -1440,6 +1450,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, > for (pp = pm; pp->node != MAX_NUMNODES; pp++) { > struct vm_area_struct *vma; > struct page *page; > + struct page *head; > + unsigned int follflags; > > err = -EFAULT; > vma = find_vma(mm, pp->addr); > @@ -1447,8 +1459,10 @@ static int do_move_page_to_node_array(struct mm_struct *mm, > goto set_status; > > /* FOLL_DUMP to ignore special (like zero) pages */ > - page = follow_page(vma, pp->addr, > - FOLL_GET | FOLL_SPLIT | FOLL_DUMP); > + follflags = FOLL_GET | FOLL_SPLIT | FOLL_DUMP; FOLL_SPLIT should be added depending on thp_migration_supported(). Thanks, Naoya Horiguchi > + if (!thp_migration_supported()) > + follflags |= FOLL_SPLIT; > + page = follow_page(vma, pp->addr, follflags); > > err = PTR_ERR(page); > if (IS_ERR(page)) > @@ -1458,7 +1472,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm, > if (!page) > goto set_status; > > - pp->page = page; > err = page_to_nid(page); > > if (err == pp->node) > @@ -1473,16 +1486,22 @@ static int do_move_page_to_node_array(struct mm_struct *mm, > goto put_and_set; > > if (PageHuge(page)) { > - if (PageHead(page)) > + if (PageHead(page)) { > isolate_huge_page(page, &pagelist); > + err = 0; > + pp->page = page; > + } > goto put_and_set; > } > > - err = isolate_lru_page(page); > + pp->page = compound_head(page); > + head = compound_head(page); > + err = isolate_lru_page(head); > if (!err) { > - list_add_tail(&page->lru, &pagelist); > - inc_node_page_state(page, NR_ISOLATED_ANON + > - page_is_file_cache(page)); > + list_add_tail(&head->lru, &pagelist); > + mod_node_page_state(page_pgdat(head), > + NR_ISOLATED_ANON + page_is_file_cache(head), > + hpage_nr_pages(head)); > } > put_and_set: > /* > -- > 2.11.0 > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href