The quilt patch titled Subject: mm: convert ptlock_init() to use ptdescs has been removed from the -mm tree. Its filename was mm-convert-ptlock_init-to-use-ptdescs.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Vishal Moola (Oracle)" <vishal.moola@xxxxxxxxx> Subject: mm: convert ptlock_init() to use ptdescs Date: Mon, 7 Aug 2023 16:04:50 -0700 This removes some direct accesses to struct page, working towards splitting out struct ptdesc from struct page. Link: https://lkml.kernel.org/r/20230807230513.102486-9-vishal.moola@xxxxxxxxx Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Acked-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: Dinh Nguyen <dinguyen@xxxxxxxxxx> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Cc: Geert Uytterhoeven <geert+renesas@xxxxxxxxx> Cc: Guo Ren <guoren@xxxxxxxxxx> Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: John Paul Adrian Glaubitz <glaubitz@xxxxxxxxxxxxxxxxxxx> Cc: Jonas Bonn <jonas@xxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Richard Weinberger <richard@xxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mm.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) --- a/include/linux/mm.h~mm-convert-ptlock_init-to-use-ptdescs +++ a/include/linux/mm.h @@ -2858,7 +2858,7 @@ static inline spinlock_t *pte_lockptr(st return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); } -static inline bool ptlock_init(struct page *page) +static inline bool ptlock_init(struct ptdesc *ptdesc) { /* * prep_new_page() initialize page->private (and therefore page->ptl) @@ -2867,10 +2867,10 @@ static inline bool ptlock_init(struct pa * It can happen if arch try to use slab for page table allocation: * slab code uses page->slab_cache, which share storage with page->ptl. */ - VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); - if (!ptlock_alloc(page_ptdesc(page))) + VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc)); + if (!ptlock_alloc(ptdesc)) return false; - spin_lock_init(ptlock_ptr(page_ptdesc(page))); + spin_lock_init(ptlock_ptr(ptdesc)); return true; } @@ -2883,13 +2883,13 @@ static inline spinlock_t *pte_lockptr(st return &mm->page_table_lock; } static inline void ptlock_cache_init(void) {} -static inline bool ptlock_init(struct page *page) { return true; } +static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline void ptlock_free(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static inline bool pgtable_pte_page_ctor(struct page *page) { - if (!ptlock_init(page)) + if (!ptlock_init(page_ptdesc(page))) return false; __SetPageTable(page); inc_lruvec_page_state(page, NR_PAGETABLE); @@ -2964,7 +2964,7 @@ static inline bool pmd_ptlock_init(struc #ifdef CONFIG_TRANSPARENT_HUGEPAGE ptdesc->pmd_huge_pte = NULL; #endif - return ptlock_init(ptdesc_page(ptdesc)); + return ptlock_init(ptdesc); } static inline void pmd_ptlock_free(struct page *page) _ Patches currently in -mm which might be from vishal.moola@xxxxxxxxx are