On Thu, Sep 29, 2022 at 11:07:25AM -0700, Paul E. McKenney wrote: > @@ -1090,7 +1121,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, > int ss_state; > > check_init_srcu_struct(ssp); > - idx = srcu_read_lock(ssp); > + idx = __srcu_read_lock_nmisafe(ssp); Why do we need to force the atomic based version here (even if CONFIG_NEED_SRCU_NMI_SAFE=y)? > ss_state = smp_load_acquire(&ssp->srcu_size_state); > if (ss_state < SRCU_SIZE_WAIT_CALL) > sdp = per_cpu_ptr(ssp->sda, 0); > @@ -1123,7 +1154,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, > srcu_funnel_gp_start(ssp, sdp, s, do_norm); > else if (needexp) > srcu_funnel_exp_start(ssp, sdp_mynode, s); > - srcu_read_unlock(ssp, idx); > + __srcu_read_unlock_nmisafe(ssp, idx); > return s; > } > > @@ -1427,13 +1458,13 @@ void srcu_barrier(struct srcu_struct *ssp) > /* Initial count prevents reaching zero until all CBs are posted. */ > atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); > > - idx = srcu_read_lock(ssp); > + idx = __srcu_read_lock_nmisafe(ssp); And same here? Thanks. > if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) > srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0)); > else > for_each_possible_cpu(cpu) > srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); > - srcu_read_unlock(ssp, idx); > + __srcu_read_unlock_nmisafe(ssp, idx); > > /* Remove the initial count, at which point reaching zero can happen. */ > if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) > -- > 2.31.1.189.g2e36527f23 >