On Tue, Oct 22, 2019 at 10:47:56AM -0400, Johannes Weiner wrote: > This function currently takes the node or lruvec size and subtracts > the zones that are excluded by the classzone index of the > allocation. It uses four different types of counters to do this. > > Just add up the eligible zones. > > Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx> > --- > mm/vmscan.c | 21 +++++---------------- > 1 file changed, 5 insertions(+), 16 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 1154b3a2b637..57f533b808f2 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -351,32 +351,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone) > */ > unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) > { > - unsigned long lru_size = 0; > + unsigned long size = 0; > int zid; > > - if (!mem_cgroup_disabled()) { > - for (zid = 0; zid < MAX_NR_ZONES; zid++) > - lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); > - } else > - lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); > - > - for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) { > + for (zid = 0; zid <= zone_idx; zid++) { > struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; > - unsigned long size; > > if (!managed_zone(zone)) > continue; > > if (!mem_cgroup_disabled()) > - size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); > + size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); > else > - size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], > - NR_ZONE_LRU_BASE + lru); > - lru_size -= min(size, lru_size); > + size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); > } > - > - return lru_size; > - > + return size; Neat! Reviewed-by: Roman Gushchin <guro@xxxxxx> Thanks!