The quilt patch titled Subject: mm: hugetlb_vmemmap: move mmap lock to vmemmap_remap_range() has been removed from the -mm tree. Its filename was mm-hugetlb_vmemmap-move-mmap-lock-to-vmemmap_remap_range.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Muchun Song <songmuchun@xxxxxxxxxxxxx> Subject: mm: hugetlb_vmemmap: move mmap lock to vmemmap_remap_range() Date: Tue, 5 Dec 2023 11:08:53 +0800 All the users of vmemmap_remap_range() will hold the mmap lock and release it once it returns, it is naturally to move the lock to vmemmap_remap_range() to simplify the code and the users. Link: https://lkml.kernel.org/r/20231205030853.3921-1-songmuchun@xxxxxxxxxxxxx Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/hugetlb_vmemmap.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) --- a/mm/hugetlb_vmemmap.c~mm-hugetlb_vmemmap-move-mmap-lock-to-vmemmap_remap_range +++ a/mm/hugetlb_vmemmap.c @@ -163,8 +163,10 @@ static int vmemmap_remap_range(unsigned VM_BUG_ON(!PAGE_ALIGNED(start | end)); + mmap_read_lock(&init_mm); ret = walk_page_range_novma(&init_mm, start, end, &vmemmap_remap_ops, NULL, walk); + mmap_read_unlock(&init_mm); if (ret) return ret; @@ -282,7 +284,6 @@ static void vmemmap_restore_pte(pte_t *p static int vmemmap_remap_split(unsigned long start, unsigned long end, unsigned long reuse) { - int ret; struct vmemmap_remap_walk walk = { .remap_pte = NULL, .flags = VMEMMAP_SPLIT_NO_TLB_FLUSH, @@ -291,11 +292,7 @@ static int vmemmap_remap_split(unsigned /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); - mmap_read_lock(&init_mm); - ret = vmemmap_remap_range(reuse, end, &walk); - mmap_read_unlock(&init_mm); - - return ret; + return vmemmap_remap_range(reuse, end, &walk); } /** @@ -358,7 +355,6 @@ static int vmemmap_remap_free(unsigned l */ BUG_ON(start - reuse != PAGE_SIZE); - mmap_read_lock(&init_mm); ret = vmemmap_remap_range(reuse, end, &walk); if (ret && walk.nr_walked) { end = reuse + walk.nr_walked * PAGE_SIZE; @@ -377,7 +373,6 @@ static int vmemmap_remap_free(unsigned l vmemmap_remap_range(reuse, end, &walk); } - mmap_read_unlock(&init_mm); return ret; } @@ -434,11 +429,7 @@ static int vmemmap_remap_alloc(unsigned if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) return -ENOMEM; - mmap_read_lock(&init_mm); - vmemmap_remap_range(reuse, end, &walk); - mmap_read_unlock(&init_mm); - - return 0; + return vmemmap_remap_range(reuse, end, &walk); } DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); _ Patches currently in -mm which might be from songmuchun@xxxxxxxxxxxxx are