There are demands to control how hard alloc_contig_range try to increase allocation success ratio. This patch abstract it by adding new enum mode parameter in alloc_contig_range. New API in next patch will add up new mode there to control it. This patch shouldn't change any existing behavior. Suggested-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx> --- drivers/virtio/virtio_mem.c | 2 +- include/linux/gfp.h | 8 +++++++- mm/cma.c | 3 ++- mm/page_alloc.c | 6 ++++-- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 9fc9ec4a25f5..5585fc67b65e 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1148,7 +1148,7 @@ static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages) */ for (retry_count = 0; retry_count < 5; retry_count++) { rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE, - GFP_KERNEL); + GFP_KERNEL, ALLOC_CONTIG_NORMAL); if (rc == -ENOMEM) /* whoops, out of memory */ return rc; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e006c..ad5872699692 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -624,9 +624,15 @@ static inline bool pm_suspended_storage(void) #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_CONTIG_ALLOC +enum alloc_contig_mode { + /* try several ways to increase success ratio of memory allocation */ + ALLOC_CONTIG_NORMAL, +}; + /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype, gfp_t gfp_mask); + unsigned migratetype, gfp_t gfp_mask, + enum alloc_contig_mode mode); extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #endif diff --git a/mm/cma.c b/mm/cma.c index 3692a34e2353..8010c1ba04b0 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -454,7 +454,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, - GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); + GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0), + ALLOC_CONTIG_NORMAL); if (ret == 0) { page = pfn_to_page(pfn); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f91df593bf71..adfbfd95fbc3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8533,6 +8533,7 @@ static void __alloc_contig_clear_range(unsigned long start_pfn, * be either of the two. * @gfp_mask: GFP mask to use during compaction. __GFP_ZERO clears allocated * pages. + * @mode: how hard it will try to increase allocation success ratio * * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES * aligned. The PFN range must belong to a single zone. @@ -8546,7 +8547,8 @@ static void __alloc_contig_clear_range(unsigned long start_pfn, * need to be freed with free_contig_range(). */ int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype, gfp_t gfp_mask) + unsigned migratetype, gfp_t gfp_mask, + enum alloc_contig_mode mode) { unsigned long outer_start, outer_end; unsigned int order; @@ -8689,7 +8691,7 @@ static int __alloc_contig_pages(unsigned long start_pfn, unsigned long end_pfn = start_pfn + nr_pages; return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, - gfp_mask); + gfp_mask, ALLOC_CONTIG_NORMAL); } static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, -- 2.29.2.454.gaff20da3a2-goog