The patch titled Subject: mm/cma: remove ALLOC_CMA has been added to the -mm tree. Its filename is mm-cma-remove-alloc_cma.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-cma-remove-alloc_cma.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-cma-remove-alloc_cma.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Subject: mm/cma: remove ALLOC_CMA Now, all reserved pages for CMA region are belong to the ZONE_MOVABLE and it only serves for a request with GFP_HIGHMEM && GFP_MOVABLE. Therefore, we don't need to maintain ALLOC_CMA at all. Link: http://lkml.kernel.org/r/1503556593-10720-3-git-send-email-iamjoonsoo.kim@xxxxxxx Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Laura Abbott <lauraa@xxxxxxxxxxxxxx> Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Michal Nazarewicz <mina86@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/compaction.c | 4 +--- mm/internal.h | 1 - mm/page_alloc.c | 28 +++------------------------- 3 files changed, 4 insertions(+), 29 deletions(-) diff -puN mm/compaction.c~mm-cma-remove-alloc_cma mm/compaction.c --- a/mm/compaction.c~mm-cma-remove-alloc_cma +++ a/mm/compaction.c @@ -1458,14 +1458,12 @@ static enum compact_result __compaction_ * if compaction succeeds. * For costly orders, we require low watermark instead of min for * compaction to proceed to increase its chances. - * ALLOC_CMA is used, as pages in CMA pageblocks are considered - * suitable migration targets */ watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, - ALLOC_CMA, wmark_target)) + 0, wmark_target)) return COMPACT_SKIPPED; return COMPACT_CONTINUE; diff -puN mm/internal.h~mm-cma-remove-alloc_cma mm/internal.h --- a/mm/internal.h~mm-cma-remove-alloc_cma +++ a/mm/internal.h @@ -497,7 +497,6 @@ unsigned long reclaim_clean_pages_from_l #define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ -#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ enum ttu_flags; struct tlbflush_unmap_batch; diff -puN mm/page_alloc.c~mm-cma-remove-alloc_cma mm/page_alloc.c --- a/mm/page_alloc.c~mm-cma-remove-alloc_cma +++ a/mm/page_alloc.c @@ -2720,7 +2720,7 @@ int __isolate_free_page(struct page *pag * exists. */ watermark = min_wmark_pages(zone) + (1UL << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) + if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) return 0; __mod_zone_freepage_state(zone, -(1UL << order), mt); @@ -2997,12 +2997,6 @@ bool __zone_watermark_ok(struct zone *z, } -#ifdef CONFIG_CMA - /* If allocation can't use CMA areas don't use free CMA pages */ - if (!(alloc_flags & ALLOC_CMA)) - free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); -#endif - /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead @@ -3032,10 +3026,8 @@ bool __zone_watermark_ok(struct zone *z, } #ifdef CONFIG_CMA - if ((alloc_flags & ALLOC_CMA) && - !list_empty(&area->free_list[MIGRATE_CMA])) { + if (!list_empty(&area->free_list[MIGRATE_CMA])) return true; - } #endif } return false; @@ -3052,13 +3044,6 @@ static inline bool zone_watermark_fast(s unsigned long mark, int classzone_idx, unsigned int alloc_flags) { long free_pages = zone_page_state(z, NR_FREE_PAGES); - long cma_pages = 0; - -#ifdef CONFIG_CMA - /* If allocation can't use CMA areas don't use free CMA pages */ - if (!(alloc_flags & ALLOC_CMA)) - cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); -#endif /* * Fast check for order-0 only. If this fails then the reserves @@ -3067,7 +3052,7 @@ static inline bool zone_watermark_fast(s * the caller is !atomic then it'll uselessly search the free * list. That corner case is then slower but it is harmless. */ - if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) + if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx]) return true; return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, @@ -3648,10 +3633,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; -#ifdef CONFIG_CMA - if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; -#endif return alloc_flags; } @@ -4127,9 +4108,6 @@ static inline bool prepare_alloc_pages(g if (should_fail_alloc_page(gfp_mask, order)) return false; - if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) - *alloc_flags |= ALLOC_CMA; - return true; } _ Patches currently in -mm which might be from iamjoonsoo.kim@xxxxxxx are mm-mlock-use-page_zone-instead-of-page_zone_id.patch mm-cma-manage-the-memory-of-the-cma-area-by-using-the-zone_movable.patch mm-cma-remove-alloc_cma.patch arm-cma-avoid-double-mapping-to-the-cma-area-if-config_highmem-=-y.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html