On Mon, Jul 03, 2017 at 11:33:02AM -0400, josef@xxxxxxxxxxxxxx wrote: > From: Josef Bacik <jbacik@xxxxxx> > > Following patches will greatly increase our aggressiveness in slab > reclaim, so we need checks in place to make sure we stop trying to > reclaim slab once we've hit our reclaim target. > > Signed-off-by: Josef Bacik <jbacik@xxxxxx> > --- > mm/vmscan.c | 35 ++++++++++++++++++++++++----------- > 1 file changed, 24 insertions(+), 11 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index cf23de9..77a887a 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -305,11 +305,13 @@ EXPORT_SYMBOL(unregister_shrinker); > > #define SHRINK_BATCH 128 > > -static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, > +static unsigned long do_shrink_slab(struct scan_control *sc, > + struct shrink_control *shrinkctl, > struct shrinker *shrinker, > unsigned long nr_scanned, > unsigned long nr_eligible) > { > + struct reclaim_state *reclaim_state = current->reclaim_state; > unsigned long freed = 0; > unsigned long long delta; > long total_scan; > @@ -394,14 +396,18 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, > > shrinkctl->nr_to_scan = nr_to_scan; > ret = shrinker->scan_objects(shrinker, shrinkctl); > + if (reclaim_state) { > + sc->nr_reclaimed += reclaim_state->reclaimed_slab; > + reclaim_state->reclaimed_slab = 0; > + } > if (ret == SHRINK_STOP) > break; > freed += ret; > - > count_vm_events(SLABS_SCANNED, nr_to_scan); > total_scan -= nr_to_scan; > scanned += nr_to_scan; > - > + if (sc->nr_reclaimed >= sc->nr_to_reclaim) > + break; > cond_resched(); > } > > @@ -452,7 +458,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, > * > * Returns the number of reclaimed slab objects. > */ > -static unsigned long shrink_slab(gfp_t gfp_mask, int nid, > +static unsigned long shrink_slab(struct scan_control *sc, int nid, > struct mem_cgroup *memcg, > unsigned long nr_scanned, > unsigned long nr_eligible) > @@ -478,8 +484,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, > } > > list_for_each_entry(shrinker, &shrinker_list, list) { > - struct shrink_control sc = { > - .gfp_mask = gfp_mask, > + struct shrink_control shrinkctl = { > + .gfp_mask = sc->gfp_mask, > .nid = nid, > .memcg = memcg, > }; > @@ -494,9 +500,12 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, > continue; > > if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) > - sc.nid = 0; > + shrinkctl.nid = 0; > > - freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); > + freed += do_shrink_slab(sc, &shrinkctl, shrinker, nr_scanned, > + nr_eligible); > + if (sc->nr_to_reclaim <= sc->nr_reclaimed) > + break; > } > Such bailout ruins fair aging so that a specific shrinker in head of the list will be exhausted. Also, without fair aging, it's hard to reclaim a slab page mixed several type objects. I don't think it's a good idea to bail out after passing huge aggressive scan number. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>