The patch titled Subject: mm/mempolicy: convert queue_pages_hugetlb() to queue_folios_hugetlb() has been added to the -mm mm-unstable branch. Its filename is mm-mempolicy-convert-queue_pages_hugetlb-to-queue_folios_hugetlb.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-mempolicy-convert-queue_pages_hugetlb-to-queue_folios_hugetlb.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Vishal Moola (Oracle)" <vishal.moola@xxxxxxxxx> Subject: mm/mempolicy: convert queue_pages_hugetlb() to queue_folios_hugetlb() Date: Wed, 25 Jan 2023 15:41:32 -0800 This change is in preparation for the conversion of queue_pages_required() to queue_folio_required() and migrate_page_add() to migrate_folio_add(). Link: https://lkml.kernel.org/r/20230125234134.227244-5-vishal.moola@xxxxxxxxx Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/mm/mempolicy.c~mm-mempolicy-convert-queue_pages_hugetlb-to-queue_folios_hugetlb +++ a/mm/mempolicy.c @@ -558,7 +558,7 @@ static int queue_folios_pte_range(pmd_t return addr != end ? -EIO : 0; } -static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, +static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -566,7 +566,7 @@ static int queue_pages_hugetlb(pte_t *pt #ifdef CONFIG_HUGETLB_PAGE struct queue_pages *qp = walk->private; unsigned long flags = (qp->flags & MPOL_MF_VALID); - struct page *page; + struct folio *folio; spinlock_t *ptl; pte_t entry; @@ -574,13 +574,13 @@ static int queue_pages_hugetlb(pte_t *pt entry = huge_ptep_get(pte); if (!pte_present(entry)) goto unlock; - page = pte_page(entry); - if (!queue_pages_required(page, qp)) + folio = pfn_folio(pte_pfn(entry)); + if (!queue_pages_required(&folio->page, qp)) goto unlock; if (flags == MPOL_MF_STRICT) { /* - * STRICT alone means only detecting misplaced page and no + * STRICT alone means only detecting misplaced folio and no * need to further check other vma. */ ret = -EIO; @@ -591,20 +591,27 @@ static int queue_pages_hugetlb(pte_t *pt /* * Must be STRICT with MOVE*, otherwise .test_walk() have * stopped walking current vma. - * Detecting misplaced page but allow migrating pages which + * Detecting misplaced folio but allow migrating folios which * have been queued. */ ret = 1; goto unlock; } - /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ + /* + * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it + * is shared it is likely not worth migrating. + * + * To check if the folio is shared, ideally we want to make sure + * every page is mapped to the same process. Doing that is very + * expensive, so check the estimated mapcount of the folio instead. + */ if (flags & (MPOL_MF_MOVE_ALL) || - (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { - if (isolate_hugetlb(page_folio(page), qp->pagelist) && + (flags & MPOL_MF_MOVE && folio_estimated_mapcount(folio) == 1)) { + if (isolate_hugetlb(folio, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* - * Failed to isolate page but allow migrating pages + * Failed to isolate folio but allow migrating folios * which have been queued. */ ret = 1; @@ -702,7 +709,7 @@ static int queue_pages_test_walk(unsigne } static const struct mm_walk_ops queue_pages_walk_ops = { - .hugetlb_entry = queue_pages_hugetlb, + .hugetlb_entry = queue_folios_hugetlb, .pmd_entry = queue_folios_pte_range, .test_walk = queue_pages_test_walk, }; _ Patches currently in -mm which might be from vishal.moola@xxxxxxxxx are mm-khugepaged-introduce-release_pte_folio-to-replace-release_pte_page.patch mm-khugepaged-convert-release_pte_pages-to-use-folios.patch pagemap-add-filemap_grab_folio.patch filemap-added-filemap_get_folios_tag.patch filemap-convert-__filemap_fdatawait_range-to-use-filemap_get_folios_tag.patch page-writeback-convert-write_cache_pages-to-use-filemap_get_folios_tag.patch afs-convert-afs_writepages_region-to-use-filemap_get_folios_tag.patch btrfs-convert-btree_write_cache_pages-to-use-filemap_get_folio_tag.patch btrfs-convert-extent_write_cache_pages-to-use-filemap_get_folios_tag.patch ceph-convert-ceph_writepages_start-to-use-filemap_get_folios_tag.patch cifs-convert-wdata_alloc_and_fillpages-to-use-filemap_get_folios_tag.patch ext4-convert-mpage_prepare_extent_to_map-to-use-filemap_get_folios_tag.patch f2fs-convert-f2fs_fsync_node_pages-to-use-filemap_get_folios_tag.patch f2fs-convert-f2fs_flush_inline_data-to-use-filemap_get_folios_tag.patch f2fs-convert-f2fs_sync_node_pages-to-use-filemap_get_folios_tag.patch f2fs-convert-f2fs_write_cache_pages-to-use-filemap_get_folios_tag.patch f2fs-convert-last_fsync_dnode-to-use-filemap_get_folios_tag.patch f2fs-convert-f2fs_sync_meta_pages-to-use-filemap_get_folios_tag.patch gfs2-convert-gfs2_write_cache_jdata-to-use-filemap_get_folios_tag.patch nilfs2-convert-nilfs_lookup_dirty_data_buffers-to-use-filemap_get_folios_tag.patch nilfs2-convert-nilfs_lookup_dirty_node_buffers-to-use-filemap_get_folios_tag.patch nilfs2-convert-nilfs_btree_lookup_dirty_buffers-to-use-filemap_get_folios_tag.patch nilfs2-convert-nilfs_copy_dirty_pages-to-use-filemap_get_folios_tag.patch nilfs2-convert-nilfs_clear_dirty_pages-to-use-filemap_get_folios_tag.patch filemap-remove-find_get_pages_range_tag.patch mm-add-folio_estimated_mapcount.patch mm-mempolicy-convert-queue_pages_pmd-to-queue_folios_pmd.patch mm-mempolicy-convert-queue_pages_pte_range-to-queue_folios_pte_range.patch mm-mempolicy-convert-queue_pages_hugetlb-to-queue_folios_hugetlb.patch mm-mempolicy-convert-queue_pages_required-to-queue_folio_required.patch mm-mempolicy-convert-migrate_page_add-to-migrate_folio_add.patch