[PATCH 2/3] rcu: Use consistent locking around kfree_rcu_drain_unlock()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Remove the conditional unlocking around in kfree_rcu_drain_unlock().
There is no need for this even with the _unlock() suffix.
Both callers can do the locking and unlocking.

There is no need for _irqsave(). The worker and the shrinker are always
invoked with enabled interrupts, use _irq() suffix.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
 kernel/rcu/tree.c | 26 +++++++++-----------------
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5b0b63dd04b02..b31b61721a9ff 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3040,38 +3040,32 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
 	return queued;
 }
 
-static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
-					  unsigned long flags)
+static void kfree_rcu_drain(struct kfree_rcu_cpu *krcp)
 {
 	// Attempt to start a new batch.
 	krcp->monitor_todo = false;
-	if (queue_kfree_rcu_work(krcp)) {
+	if (queue_kfree_rcu_work(krcp))
 		// Success! Our job is done here.
-		spin_unlock_irqrestore(&krcp->lock, flags);
 		return;
-	}
 
 	// Previous RCU batch still in progress, try again later.
 	krcp->monitor_todo = true;
 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
-	spin_unlock_irqrestore(&krcp->lock, flags);
 }
 
 /*
  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
- * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
+ * It invokes kfree_rcu_drain() to attempt to start another batch.
  */
 static void kfree_rcu_monitor(struct work_struct *work)
 {
-	unsigned long flags;
 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
 						 monitor_work.work);
 
-	spin_lock_irqsave(&krcp->lock, flags);
+	spin_lock_irq(&krcp->lock);
 	if (krcp->monitor_todo)
-		kfree_rcu_drain_unlock(krcp, flags);
-	else
-		spin_unlock_irqrestore(&krcp->lock, flags);
+		kfree_rcu_drain(krcp);
+	spin_unlock_irq(&krcp->lock);
 }
 
 static inline bool
@@ -3191,18 +3185,16 @@ static unsigned long
 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
 	int cpu, freed = 0;
-	unsigned long flags;
 
 	for_each_online_cpu(cpu) {
 		int count;
 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
 		count = krcp->count;
-		spin_lock_irqsave(&krcp->lock, flags);
+		spin_lock_irq(&krcp->lock);
 		if (krcp->monitor_todo)
-			kfree_rcu_drain_unlock(krcp, flags);
-		else
-			spin_unlock_irqrestore(&krcp->lock, flags);
+			kfree_rcu_drain(krcp);
+		spin_unlock_irq(&krcp->lock);
 
 		sc->nr_to_scan -= count;
 		freed += count;
-- 
2.26.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux