Patch "lockdep: fix deadlock issue between lockdep and rcu" has been added to the 5.4-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    lockdep: fix deadlock issue between lockdep and rcu

to the 5.4-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
and it can be found in the queue-5.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 9b77da44049e49e040887d135333341e5eb18ca4
Author: Zhiguo Niu <zhiguo.niu@xxxxxxxxxx>
Date:   Thu Jun 20 22:54:34 2024 +0000

    lockdep: fix deadlock issue between lockdep and rcu
    
    [ Upstream commit a6f88ac32c6e63e69c595bfae220d8641704c9b7 ]
    
    There is a deadlock scenario between lockdep and rcu when
    rcu nocb feature is enabled, just as following call stack:
    
         rcuop/x
    -000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
    -001|queued_spin_lock(inline) // try to hold nocb_gp_lock
    -001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
    -002|__raw_spin_lock_irqsave(inline)
    -002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
    -003|wake_nocb_gp_defer(inline)
    -003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
    -004|__call_rcu_common(inline)
    -004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
    -005|call_rcu_zapped(inline)
    -005|free_zapped_rcu(ch = ?)// hold graph lock
    -006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
    -007|nocb_cb_wait(inline)
    -007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
    -008|kthread(_create = 0xFFFFFF80803122C0)
    -009|ret_from_fork(asm)
    
         rcuop/y
    -000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
    -001|queued_spin_lock()
    -001|lockdep_lock()
    -001|graph_lock() // try to hold graph lock
    -002|lookup_chain_cache_add()
    -002|validate_chain()
    -003|lock_acquire
    -004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
    -005|lock_timer_base(inline)
    -006|mod_timer(inline)
    -006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
    -006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
    -007|__call_rcu_common(inline)
    -007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
    -008|call_rcu_hurry(inline)
    -008|rcu_sync_call(inline)
    -008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
    -009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
    -010|nocb_cb_wait(inline)
    -010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
    -011|kthread(_create = 0xFFFFFF8080363740)
    -012|ret_from_fork(asm)
    
    rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
    This patch release the graph lock before lockdep call_rcu.
    
    Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
    Cc: stable@xxxxxxxxxxxxxxx
    Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
    Cc: Waiman Long <longman@xxxxxxxxxx>
    Cc: Carlos Llamas <cmllamas@xxxxxxxxxx>
    Cc: Bart Van Assche <bvanassche@xxxxxxx>
    Signed-off-by: Zhiguo Niu <zhiguo.niu@xxxxxxxxxx>
    Signed-off-by: Xuewen Yan <xuewen.yan@xxxxxxxxxx>
    Reviewed-by: Waiman Long <longman@xxxxxxxxxx>
    Reviewed-by: Carlos Llamas <cmllamas@xxxxxxxxxx>
    Reviewed-by: Bart Van Assche <bvanassche@xxxxxxx>
    Signed-off-by: Carlos Llamas <cmllamas@xxxxxxxxxx>
    Acked-by: Paul E. McKenney <paulmck@xxxxxxxxxx>
    Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
    Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@xxxxxxxxxx
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8e0351970a1da..7cc790a262def 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5054,25 +5054,27 @@ static struct pending_free *get_pending_free(void)
 static void free_zapped_rcu(struct rcu_head *cb);
 
 /*
- * Schedule an RCU callback if no RCU callback is pending. Must be called with
- * the graph lock held.
- */
-static void call_rcu_zapped(struct pending_free *pf)
+* See if we need to queue an RCU callback, must called with
+* the lockdep lock held, returns false if either we don't have
+* any pending free or the callback is already scheduled.
+* Otherwise, a call_rcu() must follow this function call.
+*/
+static bool prepare_call_rcu_zapped(struct pending_free *pf)
 {
 	WARN_ON_ONCE(inside_selftest());
 
 	if (list_empty(&pf->zapped))
-		return;
+		return false;
 
 	if (delayed_free.scheduled)
-		return;
+		return false;
 
 	delayed_free.scheduled = true;
 
 	WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
 	delayed_free.index ^= 1;
 
-	call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+	return true;
 }
 
 /* The caller must hold the graph lock. May be called from RCU context. */
@@ -5098,6 +5100,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
 {
 	struct pending_free *pf;
 	unsigned long flags;
+	bool need_callback;
 
 	if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
 		return;
@@ -5109,14 +5112,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
 	pf = delayed_free.pf + (delayed_free.index ^ 1);
 	__free_zapped_classes(pf);
 	delayed_free.scheduled = false;
+	need_callback =
+		prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
+	lockdep_unlock();
+	raw_local_irq_restore(flags);
 
 	/*
-	 * If there's anything on the open list, close and start a new callback.
-	 */
-	call_rcu_zapped(delayed_free.pf + delayed_free.index);
+	* If there's pending free and its callback has not been scheduled,
+	* queue an RCU callback.
+	*/
+	if (need_callback)
+		call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
 
-	lockdep_unlock();
-	raw_local_irq_restore(flags);
 }
 
 /*
@@ -5156,6 +5163,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 {
 	struct pending_free *pf;
 	unsigned long flags;
+	bool need_callback;
 
 	init_data_structures_once();
 
@@ -5163,10 +5171,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 	lockdep_lock();
 	pf = get_pending_free();
 	__lockdep_free_key_range(pf, start, size);
-	call_rcu_zapped(pf);
+	need_callback = prepare_call_rcu_zapped(pf);
 	lockdep_unlock();
 	raw_local_irq_restore(flags);
-
+	if (need_callback)
+		call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
 	/*
 	 * Wait for any possible iterators from look_up_lock_class() to pass
 	 * before continuing to free the memory they refer to.
@@ -5260,6 +5269,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
 	struct pending_free *pf;
 	unsigned long flags;
 	int locked;
+	bool need_callback = false;
 
 	raw_local_irq_save(flags);
 	locked = graph_lock();
@@ -5268,11 +5278,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
 
 	pf = get_pending_free();
 	__lockdep_reset_lock(pf, lock);
-	call_rcu_zapped(pf);
+	need_callback = prepare_call_rcu_zapped(pf);
 
 	graph_unlock();
 out_irq:
 	raw_local_irq_restore(flags);
+	if (need_callback)
+		call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
 }
 
 /*
@@ -5316,6 +5328,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
 	struct pending_free *pf;
 	unsigned long flags;
 	bool found = false;
+	bool need_callback = false;
 
 	might_sleep();
 
@@ -5336,11 +5349,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
 	if (found) {
 		pf = get_pending_free();
 		__lockdep_free_key_range(pf, key, 1);
-		call_rcu_zapped(pf);
+		need_callback = prepare_call_rcu_zapped(pf);
 	}
 	lockdep_unlock();
 	raw_local_irq_restore(flags);
 
+	if (need_callback)
+		call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
 	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
 	synchronize_rcu();
 }




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux