From: Zqiang <qiang.zhang@xxxxxxxxxxxxx> Add free per-cpu existing krcp's page cache operation, when the system is under memory pressure. Signed-off-by: Zqiang <qiang.zhang@xxxxxxxxxxxxx> --- kernel/rcu/tree.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c1ae1e52f638..644b0f3c7b9f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3571,17 +3571,41 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(kvfree_call_rcu); +static int free_krc_page_cache(struct kfree_rcu_cpu *krcp) +{ + unsigned long flags; + struct llist_node *page_list, *pos, *n; + int freed = 0; + + raw_spin_lock_irqsave(&krcp->lock, flags); + page_list = llist_del_all(&krcp->bkvcache); + krcp->nr_bkv_objs = 0; + raw_spin_unlock_irqrestore(&krcp->lock, flags); + + llist_for_each_safe(pos, n, page_list) { + free_page((unsigned long)pos); + freed++; + } + + return freed; +} + static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { int cpu; unsigned long count = 0; + unsigned long flags; /* Snapshot count of all CPUs */ for_each_possible_cpu(cpu) { struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); count += READ_ONCE(krcp->count); + + raw_spin_lock_irqsave(&krcp->lock, flags); + count += krcp->nr_bkv_objs; + raw_spin_unlock_irqrestore(&krcp->lock, flags); } return count; @@ -3598,6 +3622,8 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); count = krcp->count; + count += free_krc_page_cache(krcp); + raw_spin_lock_irqsave(&krcp->lock, flags); if (krcp->monitor_todo) kfree_rcu_drain_unlock(krcp, flags); -- 2.17.1