> monitor_todo is not needed as the work struct already tracks if work is > pending. Just use that to know if work is pending using > delayed_work_pending() helper. > > Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> > --- > kernel/rcu/tree.c | 22 +++++++--------------- > 1 file changed, 7 insertions(+), 15 deletions(-) > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c > index 3baf29014f86..3828ac3bf1c4 100644 > --- a/kernel/rcu/tree.c > +++ b/kernel/rcu/tree.c > @@ -3155,7 +3155,6 @@ struct kfree_rcu_cpu_work { > * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period > * @lock: Synchronize access to this structure > * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending > * @initialized: The @rcu_work fields have been initialized > * @count: Number of objects for which GP not started > * @bkvcache: > @@ -3180,7 +3179,6 @@ struct kfree_rcu_cpu { > struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; > raw_spinlock_t lock; > struct delayed_work monitor_work; > - bool monitor_todo; > bool initialized; > int count; > > @@ -3416,9 +3414,7 @@ static void kfree_rcu_monitor(struct work_struct *work) > // of the channels that is still busy we should rearm the > // work to repeat an attempt. Because previous batches are > // still in progress. > - if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) > - krcp->monitor_todo = false; > - else > + if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) > schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); > > raw_spin_unlock_irqrestore(&krcp->lock, flags); > @@ -3607,10 +3603,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) > > // Set timer to drain after KFREE_DRAIN_JIFFIES. > if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && > - !krcp->monitor_todo) { > - krcp->monitor_todo = true; > + !delayed_work_pending(&krcp->monitor_work)) > schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); > - } > > unlock_return: > krc_this_cpu_unlock(krcp, flags); > @@ -3685,14 +3679,12 @@ void __init kfree_rcu_scheduler_running(void) > struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); > > raw_spin_lock_irqsave(&krcp->lock, flags); > - if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) || > - krcp->monitor_todo) { > - raw_spin_unlock_irqrestore(&krcp->lock, flags); > - continue; > + if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) { > + if (delayed_work_pending(&krcp->monitor_work)) { > + schedule_delayed_work_on(cpu, &krcp->monitor_work, > + KFREE_DRAIN_JIFFIES); > + } > } > - krcp->monitor_todo = true; > - schedule_delayed_work_on(cpu, &krcp->monitor_work, > - KFREE_DRAIN_JIFFIES); > raw_spin_unlock_irqrestore(&krcp->lock, flags); > } > } > -- > Looks good to me from the first glance, but let me know to have a look at it more closely. -- Uladzislau Rezki