Currently, in kfree_rcu_shrink_scan(), the drain_page_cache() is executed before kfree_rcu_monitor() to drain page cache, if the bnode structure's->gp_snap has done, the kvfree_rcu_bulk() will fill the page cache again in kfree_rcu_monitor(), this commit add a check for krcp structure's->backoff_page_cache_fill in put_cached_bnode(), if the krcp structure's->backoff_page_cache_fill is set, prevent page cache growing and disable allocated page in fill_page_cache_func(). Signed-off-by: Zqiang <qiang1.zhang@xxxxxxxxx> --- kernel/rcu/tree.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cc34d13be181..9d9d3772cc45 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2908,6 +2908,8 @@ static inline bool put_cached_bnode(struct kfree_rcu_cpu *krcp, struct kvfree_rcu_bulk_data *bnode) { + if (atomic_read(&krcp->backoff_page_cache_fill)) + return false; // Check the limit. if (krcp->nr_bkv_objs >= rcu_min_cached_objs) return false; @@ -3221,7 +3223,7 @@ static void fill_page_cache_func(struct work_struct *work) int i; nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? - 1 : rcu_min_cached_objs; + 0 : rcu_min_cached_objs; for (i = 0; i < nr_pages; i++) { bnode = (struct kvfree_rcu_bulk_data *) -- 2.32.0