The patch titled Subject: powerpc: convert various functions to use ptdescs has been added to the -mm mm-unstable branch. Its filename is powerpc-convert-various-functions-to-use-ptdescs.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/powerpc-convert-various-functions-to-use-ptdescs.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Vishal Moola (Oracle)" <vishal.moola@xxxxxxxxx> Subject: powerpc: convert various functions to use ptdescs Date: Mon, 31 Jul 2023 10:03:13 -0700 In order to split struct ptdesc from struct page, convert various functions to use ptdescs. Link: https://lkml.kernel.org/r/20230731170332.69404-13-vishal.moola@xxxxxxxxx Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Acked-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: David S. Miller <davem@xxxxxxxxxxxxx> Cc: Dinh Nguyen <dinguyen@xxxxxxxxxx> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Cc: Geert Uytterhoeven <geert+renesas@xxxxxxxxx> Cc: Guo Ren <guoren@xxxxxxxxxx> Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: John Paul Adrian Glaubitz <glaubitz@xxxxxxxxxxxxxxxxxxx> Cc: Jonas Bonn <jonas@xxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Richard Weinberger <richard@xxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/powerpc/mm/book3s64/mmu_context.c | 10 +-- arch/powerpc/mm/book3s64/pgtable.c | 32 ++++++------ arch/powerpc/mm/pgtable-frag.c | 58 +++++++++++------------ 3 files changed, 50 insertions(+), 50 deletions(-) --- a/arch/powerpc/mm/book3s64/mmu_context.c~powerpc-convert-various-functions-to-use-ptdescs +++ a/arch/powerpc/mm/book3s64/mmu_context.c @@ -246,15 +246,15 @@ static void destroy_contexts(mm_context_ static void pmd_frag_destroy(void *pmd_frag) { int count; - struct page *page; + struct ptdesc *ptdesc; - page = virt_to_page(pmd_frag); + ptdesc = virt_to_ptdesc(pmd_frag); /* drop all the pending references */ count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ - if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) { - pgtable_pmd_page_dtor(page); - __free_page(page); + if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { + pagetable_pmd_dtor(ptdesc); + pagetable_free(ptdesc); } } --- a/arch/powerpc/mm/book3s64/pgtable.c~powerpc-convert-various-functions-to-use-ptdescs +++ a/arch/powerpc/mm/book3s64/pgtable.c @@ -384,22 +384,22 @@ static pmd_t *get_pmd_from_cache(struct static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) { void *ret = NULL; - struct page *page; + struct ptdesc *ptdesc; gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; if (mm == &init_mm) gfp &= ~__GFP_ACCOUNT; - page = alloc_page(gfp); - if (!page) + ptdesc = pagetable_alloc(gfp, 0); + if (!ptdesc) return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_pages(page, 0); + if (!pagetable_pmd_ctor(ptdesc)) { + pagetable_free(ptdesc); return NULL; } - atomic_set(&page->pt_frag_refcount, 1); + atomic_set(&ptdesc->pt_frag_refcount, 1); - ret = page_address(page); + ret = ptdesc_address(ptdesc); /* * if we support only one fragment just return the * allocated page. @@ -409,12 +409,12 @@ static pmd_t *__alloc_for_pmdcache(struc spin_lock(&mm->page_table_lock); /* - * If we find pgtable_page set, we return + * If we find ptdesc_page set, we return * the allocated page with single fragment * count. */ if (likely(!mm->context.pmd_frag)) { - atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR); + atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR); mm->context.pmd_frag = ret + PMD_FRAG_SIZE; } spin_unlock(&mm->page_table_lock); @@ -435,15 +435,15 @@ pmd_t *pmd_fragment_alloc(struct mm_stru void pmd_fragment_free(unsigned long *pmd) { - struct page *page = virt_to_page(pmd); + struct ptdesc *ptdesc = virt_to_ptdesc(pmd); - if (PageReserved(page)) - return free_reserved_page(page); + if (pagetable_is_reserved(ptdesc)) + return free_reserved_ptdesc(ptdesc); - BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); - if (atomic_dec_and_test(&page->pt_frag_refcount)) { - pgtable_pmd_page_dtor(page); - __free_page(page); + BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); + if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { + pagetable_pmd_dtor(ptdesc); + pagetable_free(ptdesc); } } --- a/arch/powerpc/mm/pgtable-frag.c~powerpc-convert-various-functions-to-use-ptdescs +++ a/arch/powerpc/mm/pgtable-frag.c @@ -18,15 +18,15 @@ void pte_frag_destroy(void *pte_frag) { int count; - struct page *page; + struct ptdesc *ptdesc; - page = virt_to_page(pte_frag); + ptdesc = virt_to_ptdesc(pte_frag); /* drop all the pending references */ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ - if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { - pgtable_pte_page_dtor(page); - __free_page(page); + if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { + pagetable_pte_dtor(ptdesc); + pagetable_free(ptdesc); } } @@ -55,25 +55,25 @@ static pte_t *get_pte_from_cache(struct static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) { void *ret = NULL; - struct page *page; + struct ptdesc *ptdesc; if (!kernel) { - page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); - if (!page) + ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0); + if (!ptdesc) return NULL; - if (!pgtable_pte_page_ctor(page)) { - __free_page(page); + if (!pagetable_pte_ctor(ptdesc)) { + pagetable_free(ptdesc); return NULL; } } else { - page = alloc_page(PGALLOC_GFP); - if (!page) + ptdesc = pagetable_alloc(PGALLOC_GFP, 0); + if (!ptdesc) return NULL; } - atomic_set(&page->pt_frag_refcount, 1); + atomic_set(&ptdesc->pt_frag_refcount, 1); - ret = page_address(page); + ret = ptdesc_address(ptdesc); /* * if we support only one fragment just return the * allocated page. @@ -82,12 +82,12 @@ static pte_t *__alloc_for_ptecache(struc return ret; spin_lock(&mm->page_table_lock); /* - * If we find pgtable_page set, we return + * If we find ptdesc_page set, we return * the allocated page with single fragment * count. */ if (likely(!pte_frag_get(&mm->context))) { - atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); + atomic_set(&ptdesc->pt_frag_refcount, PTE_FRAG_NR); pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE); } spin_unlock(&mm->page_table_lock); @@ -108,28 +108,28 @@ pte_t *pte_fragment_alloc(struct mm_stru static void pte_free_now(struct rcu_head *head) { - struct page *page; + struct ptdesc *ptdesc; - page = container_of(head, struct page, rcu_head); - pgtable_pte_page_dtor(page); - __free_page(page); + ptdesc = container_of(head, struct ptdesc, pt_rcu_head); + pagetable_pte_dtor(ptdesc); + pagetable_free(ptdesc); } void pte_fragment_free(unsigned long *table, int kernel) { - struct page *page = virt_to_page(table); + struct ptdesc *ptdesc = virt_to_ptdesc(table); - if (PageReserved(page)) - return free_reserved_page(page); + if (pagetable_is_reserved(ptdesc)) + return free_reserved_ptdesc(ptdesc); - BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); - if (atomic_dec_and_test(&page->pt_frag_refcount)) { + BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); + if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { if (kernel) - __free_page(page); - else if (TestClearPageActive(page)) - call_rcu(&page->rcu_head, pte_free_now); + pagetable_free(ptdesc); + else if (folio_test_clear_active(ptdesc_folio(ptdesc))) + call_rcu(&ptdesc->pt_rcu_head, pte_free_now); else - pte_free_now(&page->rcu_head); + pte_free_now(&ptdesc->pt_rcu_head); } } _ Patches currently in -mm which might be from vishal.moola@xxxxxxxxx are mm-add-page_type_op-folio-functions.patch pgtable-create-struct-ptdesc.patch mm-add-utility-functions-for-ptdesc.patch mm-convert-pmd_pgtable_page-callers-to-use-pmd_ptdesc.patch mm-convert-ptlock_alloc-to-use-ptdescs.patch mm-convert-ptlock_ptr-to-use-ptdescs.patch mm-convert-pmd_ptlock_init-to-use-ptdescs.patch mm-convert-ptlock_init-to-use-ptdescs.patch mm-convert-pmd_ptlock_free-to-use-ptdescs.patch mm-convert-ptlock_free-to-use-ptdescs.patch mm-create-ptdesc-equivalents-for-pgtable_ptepmd_page_ctordtor.patch powerpc-convert-various-functions-to-use-ptdescs.patch x86-convert-various-functions-to-use-ptdescs.patch s390-convert-various-pgalloc-functions-to-use-ptdescs.patch mm-remove-page-table-members-from-struct-page.patch pgalloc-convert-various-functions-to-use-ptdescs.patch arm-convert-various-functions-to-use-ptdescs.patch arm64-convert-various-functions-to-use-ptdescs.patch csky-convert-__pte_free_tlb-to-use-ptdescs.patch hexagon-convert-__pte_free_tlb-to-use-ptdescs.patch loongarch-convert-various-functions-to-use-ptdescs.patch m68k-convert-various-functions-to-use-ptdescs.patch mips-convert-various-functions-to-use-ptdescs.patch nios2-convert-__pte_free_tlb-to-use-ptdescs.patch openrisc-convert-__pte_free_tlb-to-use-ptdescs.patch riscv-convert-alloc_pmd-pte_late-to-use-ptdescs.patch sh-convert-pte_free_tlb-to-use-ptdescs.patch sparc64-convert-various-functions-to-use-ptdescs.patch sparc-convert-pgtable_pte_page_ctor-dtor-to-ptdesc-equivalents.patch um-convert-pmd-pte_free_tlb-to-use-ptdescs.patch mm-remove-pgtable_pmd-pte_page_ctor-dtor-wrappers.patch