On Wed, 2024-10-23 at 13:37 +1100, NeilBrown wrote: > New fields sp_nractual and sv_nractual track how many actual threads are > running. sp_nrhtreads and sv_nrthreads will be the number that were > explicitly request. Currently nractually == nrthreads. > > sv_nractual is used for sizing UDP incoming socket space - in the rare > case that UDP is used. This is because each thread might need to keep a > request in the skbs. > > Signed-off-by: NeilBrown <neilb@xxxxxxx> > --- > include/linux/sunrpc/svc.h | 6 ++++-- > net/sunrpc/svc.c | 22 +++++++++++++++------- > net/sunrpc/svcsock.c | 2 +- > 3 files changed, 20 insertions(+), 10 deletions(-) > > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h > index 9d288a673705..3f2c90061b4a 100644 > --- a/include/linux/sunrpc/svc.h > +++ b/include/linux/sunrpc/svc.h > @@ -36,7 +36,8 @@ > struct svc_pool { > unsigned int sp_id; /* pool id; also node id on NUMA */ > struct lwq sp_xprts; /* pending transports */ > - unsigned int sp_nrthreads; /* # of threads in pool */ > + unsigned int sp_nrthreads; /* # of threads requested for pool */ > + unsigned int sp_nractual; /* # of threads running */ > struct list_head sp_all_threads; /* all server threads */ > struct llist_head sp_idle_threads; /* idle server threads */ > > @@ -71,7 +72,8 @@ struct svc_serv { > struct svc_stat * sv_stats; /* RPC statistics */ > spinlock_t sv_lock; > unsigned int sv_nprogs; /* Number of sv_programs */ > - unsigned int sv_nrthreads; /* # of server threads */ > + unsigned int sv_nrthreads; /* # of server threads requested*/ > + unsigned int sv_nractual; /* # of running threads */ > unsigned int sv_max_payload; /* datagram payload size */ > unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ > unsigned int sv_xdrsize; /* XDR buffer size */ > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > index bd4f02b34f44..d332f9d3d875 100644 > --- a/net/sunrpc/svc.c > +++ b/net/sunrpc/svc.c > @@ -781,8 +781,12 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, > } > > if (pool && pool->sp_nrthreads) { > - set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); > - set_bit(SP_NEED_VICTIM, &pool->sp_flags); > + if (pool->sp_nrthreads <= pool->sp_nractual) { > + set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); > + set_bit(SP_NEED_VICTIM, &pool->sp_flags); > + pool->sp_nractual -= 1; > + serv->sv_nractual -= 1; > + } This is a little strange. It decrements the thread counts before the threads actually exit. So, these counts are really just aspirational at this point. Not a bug, just a note to myself while I am going over this. > return pool; > } > return NULL; > @@ -803,6 +807,12 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) > chosen_pool = svc_pool_next(serv, pool, &state); > node = svc_pool_map_get_node(chosen_pool->sp_id); > > + serv->sv_nrthreads += 1; > + chosen_pool->sp_nrthreads += 1; > + > + if (chosen_pool->sp_nrthreads <= chosen_pool->sp_nractual) > + continue; > + > rqstp = svc_prepare_thread(serv, chosen_pool, node); > if (!rqstp) > return -ENOMEM; > @@ -812,8 +822,8 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) > svc_exit_thread(rqstp); > return PTR_ERR(task); > } > - serv->sv_nrthreads += 1; > - chosen_pool->sp_nrthreads += 1; > + serv->sv_nractual += 1; > + chosen_pool->sp_nractual += 1; > > rqstp->rq_task = task; > if (serv->sv_nrpools > 1) > @@ -850,6 +860,7 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) > TASK_IDLE); > nrservs++; > } while (nrservs < 0); > + svc_sock_update_bufs(serv); Nice little change here. No need to do this on every thread exit -- just do it once when they're all done. This change probably should be split into a separate patch. > return 0; > } > > @@ -955,13 +966,10 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp) > void > svc_exit_thread(struct svc_rqst *rqstp) > { > - struct svc_serv *serv = rqstp->rq_server; > struct svc_pool *pool = rqstp->rq_pool; > > list_del_rcu(&rqstp->rq_all); > > - svc_sock_update_bufs(serv); > - > svc_rqst_free(rqstp); > > clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); > diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c > index 825ec5357691..191dbc648bd0 100644 > --- a/net/sunrpc/svcsock.c > +++ b/net/sunrpc/svcsock.c > @@ -588,7 +588,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) > * provides an upper bound on the number of threads > * which will access the socket. > */ > - svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3); > + svc_sock_setbufsize(svsk, serv->sv_nractual + 3); > > clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); > err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, Other than maybe splitting that one change into a separate patch, this looks good to me. Reviewed-by: Jeff Layton <jlayton@xxxxxxxxxx>