On Thu, Feb 24, 2022 at 10:00:55AM -0800, Shakeel Butt wrote:
On Thu, Feb 24, 2022 at 9:34 AM Daniel Dao <dqminh@xxxxxxxxxxxxxx> wrote:
[...]
Anyways I am thinking of introducing mem_cgroup_flush_stats_asyn()
which will schedule flush_memcg_stats_dwork() without delay. Let me
prepare the patch based on 5.15-stable for you to test.
Can you please try the following patch and let me the results?
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d9b8df5ef212..cd732d7e00ca 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1002,6 +1002,7 @@ static inline unsigned long
lruvec_page_state_local(struct lruvec *lruvec,
}
void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_async(void);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item
idx,
int val);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 32ba963ebf2e..0f298cbd4763 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -682,6 +682,14 @@ void mem_cgroup_flush_stats(void)
__mem_cgroup_flush_stats();
}
+void mem_cgroup_flush_stats_aync(void)
+{
+ if (atomic_read(&stats_flush_threshold) > num_online_cpus()) {
+ mod_delayed_work(system_unbound_wq, &stats_flush_dwork, 0);
+ atomic_set(&stats_flush_threshold, 0);
+ }
+}
+
static void flush_memcg_stats_dwork(struct work_struct *w)
{
__mem_cgroup_flush_stats();
diff --git a/mm/workingset.c b/mm/workingset.c
index d5b81e4f4cbe..86d43bfc5c63 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -352,7 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats_async();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if