The quilt patch titled Subject: mm: remove alloc_pages_vma() has been removed from the -mm tree. Its filename was mm-remove-alloc_pages_vma.patch This patch was dropped because it was merged into the mm-stable branch\nof git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: mm: remove alloc_pages_vma() All callers have now been converted to use vma_alloc_folio(), so convert the body of alloc_pages_vma() to allocate folios instead. Link: https://lkml.kernel.org/r/20220504182857.4013401-5-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/gfp.h | 18 +++++--------- mm/mempolicy.c | 51 ++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 37 deletions(-) --- a/include/linux/gfp.h~mm-remove-alloc_pages_vma +++ a/include/linux/gfp.h @@ -613,13 +613,8 @@ static inline struct page *alloc_pages_n #ifdef CONFIG_NUMA struct page *alloc_pages(gfp_t gfp, unsigned int order); struct folio *folio_alloc(gfp_t gfp, unsigned order); -struct page *alloc_pages_vma(gfp_t gfp_mask, int order, - struct vm_area_struct *vma, unsigned long addr, - bool hugepage); struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages_vma(gfp_mask, order, vma, addr, true) #else static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) { @@ -629,16 +624,17 @@ static inline struct folio *folio_alloc( { return __folio_alloc_node(gfp, order, numa_node_id()); } -#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \ - alloc_pages(gfp_mask, order) #define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ folio_alloc(gfp, order) -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -#define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, false) +static inline struct page *alloc_page_vma(gfp_t gfp, + struct vm_area_struct *vma, unsigned long addr) +{ + struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); + + return &folio->page; +} extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); --- a/mm/mempolicy.c~mm-remove-alloc_pages_vma +++ a/mm/mempolicy.c @@ -2136,44 +2136,55 @@ static struct page *alloc_pages_preferre } /** - * alloc_pages_vma - Allocate a page for a VMA. + * vma_alloc_folio - Allocate a folio for a VMA. * @gfp: GFP flags. - * @order: Order of the GFP allocation. + * @order: Order of the folio. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual address of the allocation. Must be inside @vma. * @hugepage: For hugepages try only the preferred node if possible. * - * Allocate a page for a specific address in @vma, using the appropriate + * Allocate a folio for a specific address in @vma, using the appropriate * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock * of the mm_struct of the VMA to prevent it from going away. Should be - * used for all allocations for pages that will be mapped into user space. + * used for all allocations for folios that will be mapped into user space. * - * Return: The page on success or NULL if allocation fails. + * Return: The folio on success or NULL if allocation fails. */ -struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, +struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage) { struct mempolicy *pol; int node = numa_node_id(); - struct page *page; + struct folio *folio; int preferred_nid; nodemask_t *nmask; pol = get_vma_policy(vma, addr); if (pol->mode == MPOL_INTERLEAVE) { + struct page *page; unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); + gfp |= __GFP_COMP; page = alloc_page_interleave(gfp, order, nid); + if (page && order > 1) + prep_transhuge_page(page); + folio = (struct folio *)page; goto out; } if (pol->mode == MPOL_PREFERRED_MANY) { + struct page *page; + node = policy_node(gfp, pol, node); + gfp |= __GFP_COMP; page = alloc_pages_preferred_many(gfp, order, node, pol); mpol_cond_put(pol); + if (page && order > 1) + prep_transhuge_page(page); + folio = (struct folio *)page; goto out; } @@ -2200,8 +2211,8 @@ struct page *alloc_pages_vma(gfp_t gfp, * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ - page = __alloc_pages_node(hpage_node, - gfp | __GFP_THISNODE | __GFP_NORETRY, order); + folio = __folio_alloc_node(gfp | __GFP_THISNODE | + __GFP_NORETRY, order, hpage_node); /* * If hugepage allocations are configured to always @@ -2209,8 +2220,9 @@ struct page *alloc_pages_vma(gfp_t gfp, * to prefer hugepage backing, retry allowing remote * memory with both reclaim and compact as well. */ - if (!page && (gfp & __GFP_DIRECT_RECLAIM)) - page = __alloc_pages(gfp, order, hpage_node, nmask); + if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) + folio = __folio_alloc(gfp, order, hpage_node, + nmask); goto out; } @@ -2218,25 +2230,12 @@ struct page *alloc_pages_vma(gfp_t gfp, nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); - page = __alloc_pages(gfp, order, preferred_nid, nmask); + folio = __folio_alloc(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: - return page; -} -EXPORT_SYMBOL(alloc_pages_vma); - -struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, bool hugepage) -{ - struct folio *folio; - - folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr, - hugepage); - if (folio && order > 1) - prep_transhuge_page(&folio->page); - return folio; } +EXPORT_SYMBOL(vma_alloc_folio); /** * alloc_pages - Allocate pages. _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are mm-add-vma-iterator.patch mmap-use-the-vma-iterator-in-count_vma_pages_range.patch proc-remove-vma-rbtree-use-from-nommu.patch arm64-remove-mmap-linked-list-from-vdso.patch parisc-remove-mmap-linked-list-from-cache-handling.patch powerpc-remove-mmap-linked-list-walks.patch s390-remove-vma-linked-list-walks.patch x86-remove-vma-linked-list-walks.patch xtensa-remove-vma-linked-list-walks.patch cxl-remove-vma-linked-list-walk.patch optee-remove-vma-linked-list-walk.patch um-remove-vma-linked-list-walk.patch coredump-remove-vma-linked-list-walk.patch exec-use-vma-iterator-instead-of-linked-list.patch fs-proc-task_mmu-stop-using-linked-list-and-highest_vm_end.patch acct-use-vma-iterator-instead-of-linked-list.patch perf-use-vma-iterator.patch sched-use-maple-tree-iterator-to-walk-vmas.patch fork-use-vma-iterator.patch mm-khugepaged-stop-using-vma-linked-list.patch mm-ksm-use-vma-iterators-instead-of-vma-linked-list.patch mm-mlock-use-vma-iterator-and-maple-state-instead-of-vma-linked-list.patch mm-pagewalk-use-vma_find-instead-of-vma-linked-list.patch i915-use-the-vma-iterator.patch nommu-remove-uses-of-vma-linked-list.patch