The quilt patch titled Subject: mm/sparse-vmemmap: refactor core of vmemmap_populate_basepages() to helper has been removed from the -mm tree. Its filename was mm-sparse-vmemmap-refactor-core-of-vmemmap_populate_basepages-to-helper.patch This patch was dropped because it was merged into mm-stable ------------------------------------------------------ From: Joao Martins <joao.m.martins@xxxxxxxxxx> Subject: mm/sparse-vmemmap: refactor core of vmemmap_populate_basepages() to helper In preparation for describing a memmap with compound pages, move the actual pte population logic into a separate function vmemmap_populate_address() and have a new helper vmemmap_populate_range() walk through all base pages it needs to populate. While doing that, change the helper to use a pte_t* as return value, rather than an hardcoded errno of 0 or -ENOMEM. Link: https://lkml.kernel.org/r/20220420155310.9712-3-joao.m.martins@xxxxxxxxxx Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Jane Chu <jane.chu@xxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Vishal Verma <vishal.l.verma@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/sparse-vmemmap.c | 53 ++++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 17 deletions(-) --- a/mm/sparse-vmemmap.c~mm-sparse-vmemmap-refactor-core-of-vmemmap_populate_basepages-to-helper +++ a/mm/sparse-vmemmap.c @@ -608,38 +608,57 @@ pgd_t * __meminit vmemmap_pgd_populate(u return pgd; } -int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node, struct vmem_altmap *altmap) +static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, + struct vmem_altmap *altmap) { - unsigned long addr = start; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; + pgd = vmemmap_pgd_populate(addr, node); + if (!pgd) + return NULL; + p4d = vmemmap_p4d_populate(pgd, addr, node); + if (!p4d) + return NULL; + pud = vmemmap_pud_populate(p4d, addr, node); + if (!pud) + return NULL; + pmd = vmemmap_pmd_populate(pud, addr, node); + if (!pmd) + return NULL; + pte = vmemmap_pte_populate(pmd, addr, node, altmap); + if (!pte) + return NULL; + vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); + + return pte; +} + +static int __meminit vmemmap_populate_range(unsigned long start, + unsigned long end, int node, + struct vmem_altmap *altmap) +{ + unsigned long addr = start; + pte_t *pte; + for (; addr < end; addr += PAGE_SIZE) { - pgd = vmemmap_pgd_populate(addr, node); - if (!pgd) - return -ENOMEM; - p4d = vmemmap_p4d_populate(pgd, addr, node); - if (!p4d) - return -ENOMEM; - pud = vmemmap_pud_populate(p4d, addr, node); - if (!pud) - return -ENOMEM; - pmd = vmemmap_pmd_populate(pud, addr, node); - if (!pmd) - return -ENOMEM; - pte = vmemmap_pte_populate(pmd, addr, node, altmap); + pte = vmemmap_populate_address(addr, node, altmap); if (!pte) return -ENOMEM; - vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); } return 0; } +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap) +{ + return vmemmap_populate_range(start, end, node, altmap); +} + struct page * __meminit __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) _ Patches currently in -mm which might be from joao.m.martins@xxxxxxxxxx are