This consolidates the code a bit and makes it cleaner. Functionally it is the same. Reported-by: Paul E. McKenney <paulmck@xxxxxxxxxx> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> --- kernel/rcu/tree_nocb.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index e852c060d4e3..5ce66f9f4a98 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -327,10 +327,11 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, * * Note that this function always returns true if rhp is NULL. */ -static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, +static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in, unsigned long j, bool lazy) { struct rcu_cblist rcl; + struct rcu_head *rhp = rhp_in; WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)); rcu_lockdep_assert_cblist_protected(rdp); @@ -345,16 +346,16 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, /* * If the new CB requested was a lazy one, queue it onto the main - * ->cblist so we can take advantage of a sooner grade period. + * ->cblist so that we can take advantage of the grace-period that will + * happen regardless. But queue it onto the bypass list first so that + * the lazy CB is ordered with the existing CBs in the bypass list. */ if (lazy && rhp) { - rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, NULL); - rcu_cblist_enqueue(&rcl, rhp); - WRITE_ONCE(rdp->lazy_len, 0); - } else { - rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); - WRITE_ONCE(rdp->lazy_len, 0); + rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); + rhp = NULL; } + rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); + WRITE_ONCE(rdp->lazy_len, 0); rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); WRITE_ONCE(rdp->nocb_bypass_first, j); -- 2.38.0.rc1.362.ged0d419d3c-goog