On ChromeOS, using this with the increased timeout, we see that we almost always never need to initiate a new grace period. Testing also shows this frees large amounts of unreclaimed memory, under intense kfree_rcu() pressure. Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> --- v1->v2: Same logic but use polled grace periods instead of sampling gp_seq. kernel/rcu/tree.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 591187b6352e..ed41243f7a49 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2935,6 +2935,7 @@ struct kfree_rcu_cpu_work { /** * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period + * @gp_snap: The GP snapshot recorded at the last scheduling of monitor work. * @head: List of kfree_rcu() objects not yet waiting for a grace period * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period @@ -2964,6 +2965,7 @@ struct kfree_rcu_cpu { struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; raw_spinlock_t lock; struct delayed_work monitor_work; + unsigned long gp_snap; bool initialized; int count; @@ -3167,6 +3169,7 @@ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) mod_delayed_work(system_wq, &krcp->monitor_work, delay); return; } + krcp->gp_snap = get_state_synchronize_rcu(); queue_delayed_work(system_wq, &krcp->monitor_work, delay); } @@ -3217,7 +3220,10 @@ static void kfree_rcu_monitor(struct work_struct *work) // be that the work is in the pending state when // channels have been detached following by each // other. - queue_rcu_work(system_wq, &krwp->rcu_work); + if (poll_state_synchronize_rcu(krcp->gp_snap)) + queue_work(system_wq, &krwp->rcu_work.work); + else + queue_rcu_work(system_wq, &krwp->rcu_work); } } -- 2.38.1.431.g37b22c650d-goog