commit b0943d61b8fa420180f92f64ef67662b4f6cc493 upstream. THP migration can fail for a variety of reasons. Avoid flushing the TLB to deal with THP migration races until the copy is ready to start. Signed-off-by: Mel Gorman <mgorman@xxxxxxx> Reviewed-by: Rik van Riel <riel@xxxxxxxxxx> Cc: Alex Thorlton <athorlton@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> --- mm/huge_memory.c | 7 ------- mm/migrate.c | 3 +++ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 07b7eb3..4796245 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1360,13 +1360,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * The page_table_lock above provides a memory barrier - * with change_protection_range. - */ - if (mm_tlb_flush_pending(mm)) - flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); - - /* * Migrate the THP to the requested node, returns with page unlocked * and pmd_numa cleared. */ diff --git a/mm/migrate.c b/mm/migrate.c index 6609413..d455cab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1705,6 +1705,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, goto out_fail; } + if (mm_tlb_flush_pending(mm)) + flush_tlb_range(vma, mmun_start, mmun_end); + /* Prepare a page as a migration target */ __set_page_locked(new_page); SetPageSwapBacked(new_page); -- 1.8.4 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html