'slab_reclaimable' and 'nr_pages' are unsigned. so, subtraction is unsafe. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> Acked-by: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx> --- mm/vmscan.c | 14 +++++++------- 1 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 9c7e57c..8715da1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2588,7 +2588,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) .swappiness = vm_swappiness, .order = order, }; - unsigned long slab_reclaimable; + unsigned long n, m; disable_swap_token(); cond_resched(); @@ -2615,8 +2615,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) } while (priority >= 0 && sc.nr_reclaimed < nr_pages); } - slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); - if (slab_reclaimable > zone->min_slab_pages) { + n = zone_page_state(zone, NR_SLAB_RECLAIMABLE); + if (n > zone->min_slab_pages) { /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current @@ -2628,16 +2628,16 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * take a long time. */ while (shrink_slab(sc.nr_scanned, gfp_mask, order) && - zone_page_state(zone, NR_SLAB_RECLAIMABLE) > - slab_reclaimable - nr_pages) + (zone_page_state(zone, NR_SLAB_RECLAIMABLE) + nr_pages > n)) ; /* * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ - sc.nr_reclaimed += slab_reclaimable - - zone_page_state(zone, NR_SLAB_RECLAIMABLE); + m = zone_page_state(zone, NR_SLAB_RECLAIMABLE); + if (m < n) + sc.nr_reclaimed += n - m; } p->reclaim_state = NULL; -- 1.6.5.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>