The quilt patch titled Subject: rmap: remove page_unlock_anon_vma_read() has been removed from the -mm tree. Its filename was rmap-remove-page_unlock_anon_vma_read.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: rmap: remove page_unlock_anon_vma_read() Date: Fri, 2 Sep 2022 20:46:51 +0100 This was simply an alias for anon_vma_unlock_read() since 2011. Link: https://lkml.kernel.org/r/20220902194653.1739778-56-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/rmap.h | 5 ----- mm/memory-failure.c | 2 +- mm/rmap.c | 5 ----- 3 files changed, 1 insertion(+), 11 deletions(-) --- a/include/linux/rmap.h~rmap-remove-page_unlock_anon_vma_read +++ a/include/linux/rmap.h @@ -458,13 +458,8 @@ struct rmap_walk_control { void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); - -/* - * Called by memory-failure.c to kill processes. - */ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, struct rmap_walk_control *rwc); -void page_unlock_anon_vma_read(struct anon_vma *anon_vma); #else /* !CONFIG_MMU */ --- a/mm/memory-failure.c~rmap-remove-page_unlock_anon_vma_read +++ a/mm/memory-failure.c @@ -529,7 +529,7 @@ static void collect_procs_anon(struct pa } } read_unlock(&tasklist_lock); - page_unlock_anon_vma_read(av); + anon_vma_unlock_read(av); } /* --- a/mm/rmap.c~rmap-remove-page_unlock_anon_vma_read +++ a/mm/rmap.c @@ -599,11 +599,6 @@ out: return anon_vma; } -void page_unlock_anon_vma_read(struct anon_vma *anon_vma) -{ - anon_vma_unlock_read(anon_vma); -} - #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are