On Tue, Sep 29, 2015 at 02:05:07PM -0700, Andrew Morton wrote: > > static bool __zone_watermark_ok(struct zone *z, unsigned int order, > > unsigned long mark, int classzone_idx, int alloc_flags, > > @@ -2317,7 +2319,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, > > { > > long min = mark; > > int o; > > - long free_cma = 0; > > + const bool alloc_harder = (alloc_flags & ALLOC_HARDER); > > hmpf. Setting a bool to 0x10 is a bit grubby. > Should be safe, but I see your point. For any other type it would be truncated and look like a bug. > > /* free_pages may go negative - that's OK */ > > free_pages -= (1 << order) - 1; > > @@ -2330,7 +2332,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, > > * the high-atomic reserves. This will over-estimate the size of the > > * atomic reserve but it avoids a search. > > */ > > - if (likely(!(alloc_flags & ALLOC_HARDER))) > > + if (likely(!alloc_harder)) > > free_pages -= z->nr_reserved_highatomic; > > else > > min -= min / 4; > > @@ -2338,22 +2340,43 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, > > #ifdef CONFIG_CMA > > /* If allocation can't use CMA areas don't use free CMA pages */ > > if (!(alloc_flags & ALLOC_CMA)) > > - free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); > > + free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); > > #endif > > > > - if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) > > + if (free_pages <= min + z->lowmem_reserve[classzone_idx]) > > return false; > > - for (o = 0; o < order; o++) { > > - /* At the next order, this order's pages become unavailable */ > > - free_pages -= z->free_area[o].nr_free << o; > > > > - /* Require fewer higher order pages to be free */ > > - min >>= 1; > > + /* order-0 watermarks are ok */ > > because? > The wizard of oz because because! This should fix it up better than clicking my shoes three times. ---8<--- From: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Subject: [PATCH] mm, page_alloc: only enforce watermarks for order-0 allocations -fix This patch is updating comments for clarity and converts a bool to an int. The code as-is is ok as the compiler is meant to cast it correctly but it looks odd to people who know the value would be truncated and lost for other types. This is a fix to the mmotm patch mm-page_alloc-only-enforce-watermarks-for-order-0-allocations.patch Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> --- mm/page_alloc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 25731624d734..fedec98aafca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2332,7 +2332,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, { long min = mark; int o; - const bool alloc_harder = (alloc_flags & ALLOC_HARDER); + const int alloc_harder = (alloc_flags & ALLOC_HARDER); /* free_pages may go negative - that's OK */ free_pages -= (1 << order) - 1; @@ -2356,14 +2356,19 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); #endif + /* + * Check watermarks for an order-0 allocation request. If these + * are not met, then a high-order request also cannot go ahead + * even if a suitable page happened to be free. + */ if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; - /* order-0 watermarks are ok */ + /* If this is an order-0 request then the watermark is fine */ if (!order) return true; - /* Check at least one high-order page is free */ + /* For a high-order request, check at least one suitable page is free */ for (o = order; o < MAX_ORDER; o++) { struct free_area *area = &z->free_area[o]; int mt; -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>