The quilt patch titled Subject: khugepaged: pass a folio to __collapse_huge_page_copy() has been removed from the -mm tree. Its filename was khugepaged-pass-a-folio-to-__collapse_huge_page_copy.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: khugepaged: pass a folio to __collapse_huge_page_copy() Date: Wed, 3 Apr 2024 18:18:33 +0100 Simplify the body of __collapse_huge_page_copy() while I'm looking at it. Link: https://lkml.kernel.org/r/20240403171838.1445826-5-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/khugepaged.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) --- a/mm/khugepaged.c~khugepaged-pass-a-folio-to-__collapse_huge_page_copy +++ a/mm/khugepaged.c @@ -767,7 +767,7 @@ static void __collapse_huge_page_copy_fa * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. * * @pte: starting of the PTEs to copy from - * @page: the new hugepage to copy contents to + * @folio: the new hugepage to copy contents to * @pmd: pointer to the new hugepage's PMD * @orig_pmd: the original raw pages' PMD * @vma: the original raw pages' virtual memory area @@ -775,33 +775,29 @@ static void __collapse_huge_page_copy_fa * @ptl: lock on raw pages' PTEs * @compound_pagelist: list that stores compound pages */ -static int __collapse_huge_page_copy(pte_t *pte, - struct page *page, - pmd_t *pmd, - pmd_t orig_pmd, - struct vm_area_struct *vma, - unsigned long address, - spinlock_t *ptl, - struct list_head *compound_pagelist) -{ - struct page *src_page; - pte_t *_pte; - pte_t pteval; - unsigned long _address; +static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio, + pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, + unsigned long address, spinlock_t *ptl, + struct list_head *compound_pagelist) +{ + unsigned int i; int result = SCAN_SUCCEED; /* * Copying pages' contents is subject to memory poison at any iteration. */ - for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; - _pte++, page++, _address += PAGE_SIZE) { - pteval = ptep_get(_pte); + for (i = 0; i < HPAGE_PMD_NR; i++) { + pte_t pteval = ptep_get(pte + i); + struct page *page = folio_page(folio, i); + unsigned long src_addr = address + i * PAGE_SIZE; + struct page *src_page; + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - clear_user_highpage(page, _address); + clear_user_highpage(page, src_addr); continue; } src_page = pte_page(pteval); - if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { + if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) { result = SCAN_COPY_MC; break; } @@ -1196,7 +1192,7 @@ static int collapse_huge_page(struct mm_ */ anon_vma_unlock_write(vma->anon_vma); - result = __collapse_huge_page_copy(pte, &folio->page, pmd, _pmd, + result = __collapse_huge_page_copy(pte, folio, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are doc-improve-the-description-of-__folio_mark_dirty.patch buffer-add-kernel-doc-for-block_dirty_folio.patch buffer-add-kernel-doc-for-try_to_free_buffers.patch buffer-fix-__bread-and-__bread_gfp-kernel-doc.patch buffer-add-kernel-doc-for-brelse-and-__brelse.patch buffer-add-kernel-doc-for-bforget-and-__bforget.patch buffer-improve-bdev_getblk-documentation.patch doc-split-bufferrst-out-of-api-summaryrst.patch doc-split-bufferrst-out-of-api-summaryrst-fix.patch mm-memory-failure-remove-fsdax_pgoff-argument-from-__add_to_kill.patch mm-memory-failure-pass-addr-to-__add_to_kill.patch mm-return-the-address-from-page_mapped_in_vma.patch mm-make-page_mapped_in_vma-conditional-on-config_memory_failure.patch mm-memory-failure-convert-shake_page-to-shake_folio.patch mm-convert-hugetlb_page_mapping_lock_write-to-folio.patch mm-memory-failure-convert-memory_failure-to-use-a-folio.patch mm-memory-failure-convert-hwpoison_user_mappings-to-take-a-folio.patch mm-memory-failure-add-some-folio-conversions-to-unpoison_memory.patch mm-memory-failure-use-folio-functions-throughout-collect_procs.patch mm-memory-failure-pass-the-folio-to-collect_procs_ksm.patch fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch memory-failure-remove-calls-to-page_mapping.patch migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch userfault-expand-folio-use-in-mfill_atomic_install_pte.patch mm-remove-page_mapping.patch mm-remove-page_cache_alloc.patch mm-remove-put_devmap_managed_page.patch mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch mm-remove-page_ref_sub_return.patch gup-use-folios-for-gup_devmap.patch mm-add-kernel-doc-for-folio_mark_accessed.patch mm-remove-pagereferenced.patch mm-simplify-thp_vma_allowable_order.patch