The following commit has been merged into the core/rcu branch of tip: Commit-ID: 5390473ec1697b71af0e9d63ef7aaa7ecd27e2c9 Gitweb: https://git.kernel.org/tip/5390473ec1697b71af0e9d63ef7aaa7ecd27e2c9 Author: Paul E. McKenney <paulmck@xxxxxxxxxx> AuthorDate: Thu, 15 Apr 2021 16:30:34 -07:00 Committer: Paul E. McKenney <paulmck@xxxxxxxxxx> CommitterDate: Mon, 10 May 2021 16:44:11 -07:00 rcu: Don't penalize priority boosting when there is nothing to boost RCU priority boosting cannot do anything unless there is at least one task blocking the current RCU grace period that was preempted within the RCU read-side critical section that it still resides in. However, the current rcu_torture_boost_failed() code will count this as an RCU priority-boosting failure if there were no CPUs blocking the current grace period. This situation can happen (for example) if the last CPU blocking the current grace period was subjected to vCPU preemption, which is always a risk for rcutorture guest OSes. This commit therefore causes rcu_torture_boost_failed() to refrain from reporting failure unless there is at least one task blocking the current RCU grace period that was preempted within the RCU read-side critical section that it still resides in. Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx> --- kernel/rcu/tree_stall.h | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 8bde1b5..6530251 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -723,6 +723,10 @@ static void check_cpu_stall(struct rcu_data *rdp) * count this as an RCU priority boosting failure. A return of true says * RCU priority boosting is to blame, and false says otherwise. If false * is returned, the first of the CPUs to blame is stored through cpup. + * If there was no CPU blocking the current grace period, but also nothing + * in need of being boosted, *cpup is set to -1. This can happen in case + * of vCPU preemption while the last CPU is reporting its quiscent state, + * for example. * * If cpup is NULL, then a lockless quick check is carried out, suitable * for high-rate usage. On the other hand, if cpup is non-NULL, each @@ -730,18 +734,25 @@ static void check_cpu_stall(struct rcu_data *rdp) */ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { + bool atb = false; int cpu; unsigned long flags; struct rcu_node *rnp; rcu_for_each_leaf_node(rnp) { if (!cpup) { - if (READ_ONCE(rnp->qsmask)) + if (READ_ONCE(rnp->qsmask)) { return false; - else + } else { + if (READ_ONCE(rnp->gp_tasks)) + atb = true; continue; + } } + *cpup = -1; raw_spin_lock_irqsave_rcu_node(rnp, flags); + if (rnp->gp_tasks) + atb = true; if (!rnp->qsmask) { // No CPUs without quiescent states for this rnp. raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -758,7 +769,7 @@ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } // Can't blame CPUs, so must blame RCU priority boosting. - return true; + return atb; } EXPORT_SYMBOL_GPL(rcu_check_boost_fail);