cb731d6 ("vmscan: per memory cgroup slab shrinkers") sought to optimize accumulating slab reclaim results in sc->nr_reclaimed only once per zone, but the memcg hierarchy walk itself uses sc->nr_reclaimed as an exit condition. This can lead to overreclaim. Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx> --- mm/vmscan.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 27d580b..a02654e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2441,11 +2441,18 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, shrink_lruvec(lruvec, swappiness, sc, &lru_pages); zone_lru_pages += lru_pages; - if (memcg && is_classzone) + if (memcg && is_classzone) { shrink_slab(sc->gfp_mask, zone_to_nid(zone), memcg, sc->nr_scanned - scanned, lru_pages); + if (reclaim_state) { + sc->nr_reclaimed += + reclaim_state->reclaimed_slab; + reclaim_state->reclaimed_slab = 0; + } + } + /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the @@ -2467,14 +2474,16 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, * Shrink the slab caches in the same proportion that * the eligible LRU pages were scanned. */ - if (global_reclaim(sc) && is_classzone) + if (global_reclaim(sc) && is_classzone) { shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, sc->nr_scanned - nr_scanned, zone_lru_pages); - if (reclaim_state) { - sc->nr_reclaimed += reclaim_state->reclaimed_slab; - reclaim_state->reclaimed_slab = 0; + if (reclaim_state) { + sc->nr_reclaimed += + reclaim_state->reclaimed_slab; + reclaim_state->reclaimed_slab = 0; + } } vmpressure(sc->gfp_mask, sc->target_mem_cgroup, -- 2.6.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>