This is a note to let you know that I've just added the patch titled rcu-tasks: Simplify trc_read_check_handler() atomic operations to the 5.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: rcu-tasks-simplify-trc_read_check_handler-atomic-operations.patch and it can be found in the queue-5.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From stable-owner@xxxxxxxxxxxxxxx Sat Jul 15 02:47:26 2023 From: "Joel Fernandes (Google)" <joel@xxxxxxxxxxxxxxxxx> Date: Sat, 15 Jul 2023 00:47:11 +0000 Subject: rcu-tasks: Simplify trc_read_check_handler() atomic operations To: stable@xxxxxxxxxxxxxxx Cc: "Joel Fernandes (Google)" <joel@xxxxxxxxxxxxxxxxx>, "Paul E . McKenney" <paulmck@xxxxxxxxxx> Message-ID: <20230715004711.2938489-4-joel@xxxxxxxxxxxxxxxxx> From: "Paul E. McKenney" <paulmck@xxxxxxxxxx> [ Upstream commit 96017bf9039763a2e02dcc6adaa18592cd73a39d ] Currently, trc_wait_for_one_reader() atomically increments the trc_n_readers_need_end counter before sending the IPI invoking trc_read_check_handler(). All failure paths out of trc_read_check_handler() and also from the smp_call_function_single() within trc_wait_for_one_reader() must carefully atomically decrement this counter. This is more complex than it needs to be. This commit therefore simplifies things and saves a few lines of code by dispensing with the atomic decrements in favor of having trc_read_check_handler() do the atomic increment only in the success case. In theory, this represents no change in functionality. Cc: <stable@xxxxxxxxxxxxxxx> # 5.10.x Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- kernel/rcu/tasks.h | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -841,32 +841,24 @@ static void trc_read_check_handler(void // If the task is no longer running on this CPU, leave. if (unlikely(texp != t)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); goto reset_ipi; // Already on holdout list, so will check later. } // If the task is not in a read-side critical section, and // if this is the last reader, awaken the grace-period kthread. if (likely(!READ_ONCE(t->trc_reader_nesting))) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); - // Mark as checked after decrement to avoid false - // positives on the above WARN_ON_ONCE(). WRITE_ONCE(t->trc_reader_checked, true); goto reset_ipi; } // If we are racing with an rcu_read_unlock_trace(), try again later. - if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); + if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) goto reset_ipi; - } WRITE_ONCE(t->trc_reader_checked, true); // Get here if the task is in a read-side critical section. Set // its state so that it will awaken the grace-period kthread upon // exit from that critical section. + atomic_inc(&trc_n_readers_need_end); // One more to wait on. WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); WRITE_ONCE(t->trc_reader_special.b.need_qs, true); @@ -960,21 +952,15 @@ static void trc_wait_for_one_reader(stru if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) return; - atomic_inc(&trc_n_readers_need_end); per_cpu(trc_ipi_to_cpu, cpu) = true; t->trc_ipi_to_cpu = cpu; rcu_tasks_trace.n_ipis++; - if (smp_call_function_single(cpu, - trc_read_check_handler, t, 0)) { + if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { // Just in case there is some other reason for // failure than the target CPU being offline. rcu_tasks_trace.n_ipis_fails++; per_cpu(trc_ipi_to_cpu, cpu) = false; t->trc_ipi_to_cpu = cpu; - if (atomic_dec_and_test(&trc_n_readers_need_end)) { - WARN_ON_ONCE(1); - wake_up(&trc_wait); - } } } } Patches currently in stable-queue which might be from stable-owner@xxxxxxxxxxxxxxx are queue-5.10/rcu-tasks-simplify-trc_read_check_handler-atomic-operations.patch queue-5.10/netfilter-nf_tables-fix-chain-binding-transaction-logic.patch queue-5.10/netfilter-nf_tables-drop-map-element-references-from-preparation-phase.patch queue-5.10/netfilter-nf_tables-fix-scheduling-while-atomic-splat.patch queue-5.10/netfilter-nf_tables-reject-unbound-anonymous-set-before-commit-phase.patch queue-5.10/netfilter-nf_tables-use-net_generic-infra-for-transaction-data.patch queue-5.10/netfilter-nftables-rename-set-element-data-activation-deactivation-functions.patch queue-5.10/netfilter-nf_tables-incorrect-error-path-handling-with-nft_msg_newrule.patch queue-5.10/rcu-tasks-mark-trc_reader_nesting-data-races.patch queue-5.10/netfilter-nf_tables-reject-unbound-chain-set-before-commit-phase.patch queue-5.10/rcu-tasks-mark-trc_reader_special.b.need_qs-data-races.patch queue-5.10/netfilter-nf_tables-add-rescheduling-points-during-loop-detection-walks.patch queue-5.10/netfilter-nf_tables-add-nft_trans_prepare_error-to-deal-with-bound-set-chain.patch queue-5.10/netfilter-nf_tables-unbind-non-anonymous-set-if-rule-construction-fails.patch