'slab_reclaimable' and 'nr_pages' are unsigned. so, subtraction is unsafe. Cc: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> --- mm/vmscan.c | 18 ++++++++++++------ 1 files changed, 12 insertions(+), 6 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index c927948..b1a90f8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2612,6 +2612,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (slab_reclaimable > zone->min_slab_pages) { unsigned long lru_pages = zone_reclaimable_pages(zone); + unsigned long srec_new; + /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current @@ -2622,17 +2624,21 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * Note that shrink_slab will free memory on all zones and may * take a long time. */ - while (shrink_slab(sc.nr_scanned, gfp_mask, lru_pages) && - zone_page_state(zone, NR_SLAB_RECLAIMABLE) > - slab_reclaimable - nr_pages) - ; + for (;;) { + if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) + break; + srec_new = zone_page_state(zone, NR_SLAB_RECLAIMABLE); + if (srec_new + nr_pages <= slab_reclaimable) + break; + } /* * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ - sc.nr_reclaimed += slab_reclaimable - - zone_page_state(zone, NR_SLAB_RECLAIMABLE); + srec_new = zone_page_state(zone, NR_SLAB_RECLAIMABLE); + if (srec_new < slab_reclaimable) + sc.nr_reclaimed += slab_reclaimable - srec_new; } p->reclaim_state = NULL; -- 1.6.5.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>