Re: + mm-hugetlb-make-alloc_gigantic_page-available-for-general-use.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri 11-10-19 13:29:32, Andrew Morton wrote:
> alloc_gigantic_page() implements an allocation method where it scans over
> various zones looking for a large contiguous memory block which could not
> have been allocated through the buddy allocator.  A subsequent patch which
> tests arch page table helpers needs such a method to allocate PUD_SIZE
> sized memory block.  In the future such methods might have other use cases
> as well.  So alloc_gigantic_page() has been split carving out actual
> memory allocation method and made available via new
> alloc_gigantic_page_order().

You are exporting a helper used for hugetlb internally. Is this really
what is needed? I haven't followed this patchset but don't you simply
need a generic 1GB allocator? If yes then you should be looking at
alloc_contig_range.

Or did I miss the point you really want hugetlb page?

> Link: http://lkml.kernel.org/r/1570775142-31425-2-git-send-email-anshuman.khandual@xxxxxxx
> Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
> Cc: Vlastimil Babka <vbabka@xxxxxxx>
> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx>
> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
> Cc: Jason Gunthorpe <jgg@xxxxxxxx>
> Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Cc: Michal Hocko <mhocko@xxxxxxxxxx>
> Cc: Mark Rutland <mark.rutland@xxxxxxx>
> Cc: Mark Brown <broonie@xxxxxxxxxx>
> Cc: Steven Price <Steven.Price@xxxxxxx>
> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>
> Cc: Masahiro Yamada <yamada.masahiro@xxxxxxxxxxxxx>
> Cc: Kees Cook <keescook@xxxxxxxxxxxx>
> Cc: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx>
> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
> Cc: Sri Krishna chowdary <schowdary@xxxxxxxxxx>
> Cc: Dave Hansen <dave.hansen@xxxxxxxxx>
> Cc: Russell King - ARM Linux <linux@xxxxxxxxxxxxxxx>
> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
> Cc: Paul Mackerras <paulus@xxxxxxxxx>
> Cc: Martin Schwidefsky <schwidefsky@xxxxxxxxxx>
> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx>
> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx>
> Cc: Vineet Gupta <vgupta@xxxxxxxxxxxx>
> Cc: James Hogan <jhogan@xxxxxxxxxx>
> Cc: Paul Burton <paul.burton@xxxxxxxx>
> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx>
> Cc: Gerald Schaefer <gerald.schaefer@xxxxxxxxxx>
> Cc: Christophe Leroy <christophe.leroy@xxxxxx>
> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> ---
> 
>  include/linux/hugetlb.h |    9 +++++++++
>  mm/hugetlb.c            |   24 ++++++++++++++++++++++--
>  2 files changed, 31 insertions(+), 2 deletions(-)
> 
> --- a/include/linux/hugetlb.h~mm-hugetlb-make-alloc_gigantic_page-available-for-general-use
> +++ a/include/linux/hugetlb.h
> @@ -299,6 +299,9 @@ static inline bool is_file_hugepages(str
>  }
>  
>  
> +struct page *
> +alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
> +			  int nid, nodemask_t *nodemask);
>  #else /* !CONFIG_HUGETLBFS */
>  
>  #define is_file_hugepages(file)			false
> @@ -310,6 +313,12 @@ hugetlb_file_setup(const char *name, siz
>  	return ERR_PTR(-ENOSYS);
>  }
>  
> +static inline struct page *
> +alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
> +			  int nid, nodemask_t *nodemask)
> +{
> +	return NULL;
> +}
>  #endif /* !CONFIG_HUGETLBFS */
>  
>  #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
> --- a/mm/hugetlb.c~mm-hugetlb-make-alloc_gigantic_page-available-for-general-use
> +++ a/mm/hugetlb.c
> @@ -1112,10 +1112,9 @@ static bool zone_spans_last_pfn(const st
>  	return zone_spans_pfn(zone, last_pfn);
>  }
>  
> -static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
> +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
>  		int nid, nodemask_t *nodemask)
>  {
> -	unsigned int order = huge_page_order(h);
>  	unsigned long nr_pages = 1 << order;
>  	unsigned long ret, pfn, flags;
>  	struct zonelist *zonelist;
> @@ -1151,6 +1150,14 @@ static struct page *alloc_gigantic_page(
>  	return NULL;
>  }
>  
> +static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
> +					int nid, nodemask_t *nodemask)
> +{
> +	unsigned int order = huge_page_order(h);
> +
> +	return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask);
> +}
> +
>  static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
>  static void prep_compound_gigantic_page(struct page *page, unsigned int order);
>  #else /* !CONFIG_CONTIG_ALLOC */
> @@ -1159,6 +1166,12 @@ static struct page *alloc_gigantic_page(
>  {
>  	return NULL;
>  }
> +
> +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
> +				       int nid, nodemask_t *nodemask)
> +{
> +	return NULL;
> +}
>  #endif /* CONFIG_CONTIG_ALLOC */
>  
>  #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
> @@ -1167,6 +1180,13 @@ static struct page *alloc_gigantic_page(
>  {
>  	return NULL;
>  }
> +
> +struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
> +				       int nid, nodemask_t *nodemask)
> +{
> +	return NULL;
> +}
> +
>  static inline void free_gigantic_page(struct page *page, unsigned int order) { }
>  static inline void destroy_compound_gigantic_page(struct page *page,
>  						unsigned int order) { }
> _
> 
> Patches currently in -mm which might be from anshuman.khandual@xxxxxxx are
> 
> mm-hugetlb-make-alloc_gigantic_page-available-for-general-use.patch
> mm-debug-add-tests-validating-architecture-page-table-helpers.patch
> mm-hotplug-reorder-memblock_-calls-in-try_remove_memory.patch

-- 
Michal Hocko
SUSE Labs




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux