The patch titled Subject: mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4 has been added to the -mm tree. Its filename is mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Subject: mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4 fix review comments from Vlastimil Link: http://lkml.kernel.org/r/1563556862-54056-2-git-send-email-yang.shi@xxxxxxxxxxxxxxxxx Signed-off-by: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/mempolicy.c | 80 +++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 44 deletions(-) --- a/mm/mempolicy.c~mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4 +++ a/mm/mempolicy.c @@ -430,10 +430,10 @@ static inline bool queue_pages_required( /* * queue_pages_pmd() has four possible return values: - * 2 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. - * 1 - pages are placed on the right node or queued successfully. - * 0 - THP was split. + * 2 - THP was split. * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an * existing page was already on a node that does not follow the * policy. @@ -454,19 +454,17 @@ static int queue_pages_pmd(pmd_t *pmd, s if (is_huge_zero_page(page)) { spin_unlock(ptl); __split_huge_pmd(walk->vma, pmd, addr, false, NULL); + ret = 2; goto out; } - if (!queue_pages_required(page, qp)) { - ret = 1; + if (!queue_pages_required(page, qp)) goto unlock; - } - ret = 1; flags = qp->flags; /* go to thp migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (!vma_migratable(walk->vma)) { - ret = 2; + ret = 1; goto unlock; } @@ -482,6 +480,13 @@ out: /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. + * + * queue_pages_pte_range() has three possible return values: + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * -EIO - only MPOL_MF_STRICT was specified and an existing page was already + * on a node that does not follow the policy. */ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -498,23 +503,10 @@ static int queue_pages_pte_range(pmd_t * ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); - switch (ret) { - /* THP was split, fall through to pte walk */ - case 0: - break; - /* Pages are placed on the right node or queued successfully */ - case 1: - return 0; - /* - * Met unmovable pages, MPOL_MF_MOVE* & MPOL_MF_STRICT - * were specified. - */ - case 2: - return 1; - case -EIO: + if (ret != 2) return ret; - } } + /* THP was split, fall through to pte walk */ if (pmd_trans_unstable(pmd)) return 0; @@ -537,7 +529,7 @@ static int queue_pages_pte_range(pmd_t * if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { /* MPOL_MF_STRICT must be specified if we get here */ if (!vma_migratable(vma)) { - has_unmovable |= true; + has_unmovable = true; break; } migrate_page_add(page, qp->pagelist, flags); @@ -1276,32 +1268,32 @@ static long do_mbind(unsigned long start ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - if (ret < 0) + if (ret < 0) { err = -EIO; - else { - err = mbind_range(mm, start, end, new); + goto up_out; + } - if (!err) { - int nr_failed = 0; + err = mbind_range(mm, start, end, new); - if (!list_empty(&pagelist)) { - WARN_ON_ONCE(flags & MPOL_MF_LAZY); - nr_failed = migrate_pages(&pagelist, new_page, - NULL, start, MIGRATE_SYNC, - MR_MEMPOLICY_MBIND); - if (nr_failed) - putback_movable_pages(&pagelist); - } + if (!err) { + int nr_failed = 0; - if ((ret > 0) || - (nr_failed && (flags & MPOL_MF_STRICT))) - err = -EIO; - } else - putback_movable_pages(&pagelist); - } + if (!list_empty(&pagelist)) { + WARN_ON_ONCE(flags & MPOL_MF_LAZY); + nr_failed = migrate_pages(&pagelist, new_page, NULL, + start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); + if (nr_failed) + putback_movable_pages(&pagelist); + } + + if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) + err = -EIO; + } else + putback_movable_pages(&pagelist); +up_out: up_write(&mm->mmap_sem); - mpol_out: +mpol_out: mpol_put(new); return err; } _ Patches currently in -mm which might be from yang.shi@xxxxxxxxxxxxxxxxx are revert-kmemleak-allow-to-coexist-with-fault-injection.patch mm-vmscan-check-if-mem-cgroup-is-disabled-or-not-before-calling-memcg-slab-shrinker.patch mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified.patch mm-mempolicy-make-the-behavior-consistent-when-mpol_mf_move-and-mpol_mf_strict-were-specified-v4.patch mm-mempolicy-handle-vma-with-unmovable-pages-mapped-correctly-in-mbind.patch mm-mempolicy-handle-vma-with-unmovable-pages-mapped-correctly-in-mbind-v4.patch