On Thu, 2023-06-29 at 14:42 -0400, Chuck Lever wrote: > From: Chuck Lever <chuck.lever@xxxxxxxxxx> > > Refactor: Extract the loop that finds an idle service thread from > svc_xprt_enqueue() and svc_wake_up(). > > Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> > --- > include/linux/sunrpc/svc.h | 1 + > net/sunrpc/svc.c | 28 +++++++++++++++++++++++++++ > net/sunrpc/svc_xprt.c | 46 +++++++++++++------------------------------- > 3 files changed, 43 insertions(+), 32 deletions(-) > > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h > index f8751118c122..dc2d90a655e2 100644 > --- a/include/linux/sunrpc/svc.h > +++ b/include/linux/sunrpc/svc.h > @@ -427,6 +427,7 @@ int svc_register(const struct svc_serv *, struct net *, const int, > > void svc_wake_up(struct svc_serv *); > void svc_reserve(struct svc_rqst *rqstp, int space); > +struct svc_rqst *svc_pool_wake_idle_thread(struct svc_pool *pool); > struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv); > char * svc_print_addr(struct svc_rqst *, char *, size_t); > const char * svc_proc_name(const struct svc_rqst *rqstp); > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > index 587811a002c9..e81ce5f76abd 100644 > --- a/net/sunrpc/svc.c > +++ b/net/sunrpc/svc.c > @@ -689,6 +689,34 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) > return rqstp; > } > > +/** > + * svc_pool_wake_idle_thread - wake an idle thread in @pool > + * @pool: service thread pool > + * > + * Returns an idle service thread (now marked BUSY), or NULL > + * if no service threads are available. Finding an idle service > + * thread and marking it BUSY is atomic with respect to other > + * calls to svc_pool_wake_idle_thread(). > + */ > +struct svc_rqst *svc_pool_wake_idle_thread(struct svc_pool *pool) > +{ > + struct svc_rqst *rqstp; > + > + rcu_read_lock(); > + list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { > + if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) > + continue; > + > + rcu_read_unlock(); > + WRITE_ONCE(rqstp->rq_qtime, ktime_get()); > + wake_up_process(rqstp->rq_task); > + percpu_counter_inc(&pool->sp_threads_woken); > + return rqstp; > + } > + rcu_read_unlock(); > + return NULL; > +} > + > /* > * Choose a pool in which to create a new thread, for svc_set_num_threads > */ > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c > index 62c7919ea610..f14476d11b67 100644 > --- a/net/sunrpc/svc_xprt.c > +++ b/net/sunrpc/svc_xprt.c > @@ -455,8 +455,8 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) > */ > void svc_xprt_enqueue(struct svc_xprt *xprt) > { > + struct svc_rqst *rqstp; > struct svc_pool *pool; > - struct svc_rqst *rqstp = NULL; > > if (!svc_xprt_ready(xprt)) > return; > @@ -476,20 +476,10 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) > list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); > spin_unlock_bh(&pool->sp_lock); > > - /* find a thread for this xprt */ > - rcu_read_lock(); > - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { > - if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) > - continue; > - percpu_counter_inc(&pool->sp_threads_woken); > - rqstp->rq_qtime = ktime_get(); > - wake_up_process(rqstp->rq_task); > - goto out_unlock; > - } > - set_bit(SP_CONGESTED, &pool->sp_flags); > - rqstp = NULL; > -out_unlock: > - rcu_read_unlock(); > + rqstp = svc_pool_wake_idle_thread(pool); > + if (!rqstp) > + set_bit(SP_CONGESTED, &pool->sp_flags); > + > trace_svc_xprt_enqueue(xprt, rqstp); > } > EXPORT_SYMBOL_GPL(svc_xprt_enqueue); > @@ -581,7 +571,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp) > svc_xprt_put(xprt); > } > > -/* > +/** > + * svc_wake_up - Wake up a service thread for non-transport work > + * @serv: RPC service > + * > * Some svc_serv's will have occasional work to do, even when a xprt is not > * waiting to be serviced. This function is there to "kick" a task in one of > * those services so that it can wake up and do that work. Note that we only > @@ -590,27 +583,16 @@ static void svc_xprt_release(struct svc_rqst *rqstp) > */ > void svc_wake_up(struct svc_serv *serv) > { > + struct svc_pool *pool = &serv->sv_pools[0]; > struct svc_rqst *rqstp; > - struct svc_pool *pool; > > - pool = &serv->sv_pools[0]; > - > - rcu_read_lock(); > - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { > - /* skip any that aren't queued */ > - if (test_bit(RQ_BUSY, &rqstp->rq_flags)) > - continue; > - rcu_read_unlock(); > - wake_up_process(rqstp->rq_task); > - trace_svc_wake_up(rqstp->rq_task->pid); > + rqstp = svc_pool_wake_idle_thread(pool); > + if (!rqstp) { > + set_bit(SP_TASK_PENDING, &pool->sp_flags); nit: it might be good to add this here, for better conformance with the old tracepoint behavior: trace_svc_wake_up(0); > return; > } > - rcu_read_unlock(); > > - /* No free entries available */ > - set_bit(SP_TASK_PENDING, &pool->sp_flags); > - smp_wmb(); I assume this wmb was for the set_bit above? Do we need that in the !rqstp case? > - trace_svc_wake_up(0); > + trace_svc_wake_up(rqstp->rq_task->pid); > } > EXPORT_SYMBOL_GPL(svc_wake_up); >