This patch add unbalance flag into sc, and just allow cgroup reclaim trigger it. If flag set, LRU reclaim will only touch target type folios. Signed-off-by: Huan Yang <link@xxxxxxxx> --- mm/vmscan.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 2cc0cb41fb32..6ed06e73143a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -111,6 +111,10 @@ struct scan_control { /* Proactive reclaim invoked by userspace through memory.reclaim */ unsigned int proactive:1; + /* Unbalance reclaim pages. */ + unsigned int unbalance_anon:1; + unsigned int unbalance_file:1; + /* * Cgroup memory below memory.low is protected as long as we * don't threaten to OOM. If any cgroup is reclaimed at @@ -2338,6 +2342,18 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) } } +static __always_inline bool unbalance_anon_reclaim(struct scan_control *sc, + int swappiness) +{ + return sc->unbalance_anon && cgroup_reclaim(sc); +} + +static __always_inline bool unbalance_file_reclaim(struct scan_control *sc, + int swappiness) +{ + return sc->unbalance_file && cgroup_reclaim(sc); +} + /* * Determine how aggressively the anon and file LRU lists should be * scanned. @@ -2358,6 +2374,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long ap, fp; enum lru_list lru; + if (unlikely(unbalance_file_reclaim(sc, swappiness))) { + scan_balance = SCAN_FILE; + goto out; + } + + if (unlikely(unbalance_anon_reclaim(sc, swappiness))) { + scan_balance = SCAN_ANON; + goto out; + } + /* If we have no swap space, do not bother scanning anon folios. */ if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { scan_balance = SCAN_FILE; -- 2.34.1