The HP_Migratable flag indicates a page is a candidate for migration. Only set the flag if the page's hstate supports migration. This allows the migration paths to detect non-migratable pages earlier. The check in unmap_and_move_huge_page for migration support can be removed as it is no longer necessary. If migration is not supported for the hstate, HP_Migratable will not be set, the page will not be isolated and no attempt will be made to migrate. Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> --- fs/hugetlbfs/inode.c | 2 +- include/linux/hugetlb.h | 9 +++++++++ mm/hugetlb.c | 8 ++++---- mm/migrate.c | 12 ------------ 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 89bc9062b4f6..14d77d01e38d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -735,7 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_unlock(&hugetlb_fault_mutex_table[hash]); - hugetlb_set_page_flag(page, HP_Migratable); + hugetlb_set_HP_Migratable(page); /* * unlock_page because locked by add_to_page_cache() * put_page() due to reference from alloc_huge_page() diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 353d81913cc7..e7157cf9967f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -716,6 +716,15 @@ static inline bool hugepage_migration_supported(struct hstate *h) return arch_hugetlb_migration_supported(h); } +/* + * Only set flag if hstate supports migration + */ +static inline void hugetlb_set_HP_Migratable(struct page *page) +{ + if (hugepage_migration_supported(page_hstate(page))) + hugetlb_set_page_flag(page, HP_Migratable); +} + /* * Movability check is different as compared to migration check. * It determines whether or not a huge page should be placed on diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c43cebf2f278..31e896c70ba0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4197,7 +4197,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page, true); hugepage_add_new_anon_rmap(new_page, vma, haddr); - hugetlb_set_page_flag(new_page, HP_Migratable); + hugetlb_set_HP_Migratable(new_page); /* Make the old page be freed below */ new_page = old_page; } @@ -4439,7 +4439,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * been isolated for migration. */ if (new_page) - hugetlb_set_page_flag(page, HP_Migratable); + hugetlb_set_HP_Migratable(page); unlock_page(page); out: @@ -4750,7 +4750,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); - hugetlb_set_page_flag(page, HP_Migratable); + hugetlb_set_HP_Migratable(page); if (vm_shared) unlock_page(page); ret = 0; @@ -5585,7 +5585,7 @@ void putback_active_hugepage(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); - hugetlb_set_page_flag(page, HP_Migratable); + hugetlb_set_HP_Migratable(page); list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); spin_unlock(&hugetlb_lock); put_page(page); diff --git a/mm/migrate.c b/mm/migrate.c index 0339f3874d7c..296d61613abc 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1272,18 +1272,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; - /* - * Migratability of hugepages depends on architectures and their size. - * This check is necessary because some callers of hugepage migration - * like soft offline and memory hotremove don't walk through page - * tables or check whether the hugepage is pmd-based or not before - * kicking migration. - */ - if (!hugepage_migration_supported(page_hstate(hpage))) { - list_move_tail(&hpage->lru, ret); - return -ENOSYS; - } - if (page_count(hpage) == 1) { /* page was freed from under us. So we are done. */ putback_active_hugepage(hpage); -- 2.29.2