alloc_gigantic_page() implements an allocation method where it scans over various zones looking for a large contiguous memory block which could not have been allocated through the buddy allocator. A subsequent patch which tests arch page table helpers needs such a method to allocate PUD_SIZE sized memory block. In the future such methods might have other use cases as well. So alloc_gigantic_page() has been split carving out actual memory allocation method and made available via new alloc_gigantic_page_order(). Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Mark Brown <broonie@xxxxxxxxxx> Cc: Steven Price <Steven.Price@xxxxxxx> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Cc: Masahiro Yamada <yamada.masahiro@xxxxxxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Sri Krishna chowdary <schowdary@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxx> Cc: Russell King - ARM Linux <linux@xxxxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Martin Schwidefsky <schwidefsky@xxxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxxxx> Cc: James Hogan <jhogan@xxxxxxxxxx> Cc: Paul Burton <paul.burton@xxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> Cc: Gerald Schaefer <gerald.schaefer@xxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxx> Cc: linux-snps-arc@xxxxxxxxxxxxxxxxxxx Cc: linux-mips@xxxxxxxxxxxxxxx Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Cc: linux-ia64@xxxxxxxxxxxxxxx Cc: linuxppc-dev@xxxxxxxxxxxxxxxx Cc: linux-s390@xxxxxxxxxxxxxxx Cc: linux-sh@xxxxxxxxxxxxxxx Cc: sparclinux@xxxxxxxxxxxxxxx Cc: x86@xxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> --- include/linux/hugetlb.h | 9 +++++++++ mm/hugetlb.c | 24 ++++++++++++++++++++++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 53fc34f930d0..cc50d5ad4885 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -299,6 +299,9 @@ static inline bool is_file_hugepages(struct file *file) } +struct page * +alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) false @@ -310,6 +313,12 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, return ERR_PTR(-ENOSYS); } +static inline struct page * +alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + return NULL; +} #endif /* !CONFIG_HUGETLBFS */ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ef37c85423a5..3fb81252f52b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1112,10 +1112,9 @@ static bool zone_spans_last_pfn(const struct zone *zone, return zone_spans_pfn(zone, last_pfn); } -static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { - unsigned int order = huge_page_order(h); unsigned long nr_pages = 1 << order; unsigned long ret, pfn, flags; struct zonelist *zonelist; @@ -1151,6 +1150,14 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, return NULL; } +static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + unsigned int order = huge_page_order(h); + + return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask); +} + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); static void prep_compound_gigantic_page(struct page *page, unsigned int order); #else /* !CONFIG_CONTIG_ALLOC */ @@ -1159,6 +1166,12 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { return NULL; } + +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + return NULL; +} #endif /* CONFIG_CONTIG_ALLOC */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ @@ -1167,6 +1180,13 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { return NULL; } + +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + return NULL; +} + static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } -- 2.20.1 _______________________________________________ linux-snps-arc mailing list linux-snps-arc@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/linux-snps-arc