On Sun, 2023-07-09 at 11:45 -0400, Chuck Lever wrote: > From: Chuck Lever <chuck.lever@xxxxxxxxxx> > > To reduce contention on the bucket locks, we must avoid calling > kfree() while each bucket lock is held. > > Start by refactoring nfsd_reply_cache_free_locked() into a helper > that removes an entry from the bucket (and must therefore run under > the lock) and a second helper that frees the entry (which does not > need to hold the lock). > > For readability, rename the helpers nfsd_cacherep_<verb>. > > Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> > --- > fs/nfsd/nfscache.c | 26 +++++++++++++++++++------- > 1 file changed, 19 insertions(+), 7 deletions(-) > > diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c > index a8eda1c85829..601298b7f75f 100644 > --- a/fs/nfsd/nfscache.c > +++ b/fs/nfsd/nfscache.c > @@ -110,21 +110,32 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, > return rp; > } > > +static void nfsd_cacherep_free(struct svc_cacherep *rp) > +{ > + kfree(rp->c_replvec.iov_base); > + kmem_cache_free(drc_slab, rp); > +} > + > static void > -nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, > - struct nfsd_net *nn) > +nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, > + struct svc_cacherep *rp) > { > - if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { > + if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) > nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); > - kfree(rp->c_replvec.iov_base); > - } > if (rp->c_state != RC_UNUSED) { > rb_erase(&rp->c_node, &b->rb_head); > list_del(&rp->c_lru); > atomic_dec(&nn->num_drc_entries); > nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp)); > } > - kmem_cache_free(drc_slab, rp); > +} > + > +static void > +nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, > + struct nfsd_net *nn) > +{ > + nfsd_cacherep_unlink_locked(nn, b, rp); > + nfsd_cacherep_free(rp); > } > > static void > @@ -132,8 +143,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, > struct nfsd_net *nn) > { > spin_lock(&b->cache_lock); > - nfsd_reply_cache_free_locked(b, rp, nn); > + nfsd_cacherep_unlink_locked(nn, b, rp); > spin_unlock(&b->cache_lock); > + nfsd_cacherep_free(rp); > } > > int nfsd_drc_slab_create(void) > > Seems straightforward. I do question whether this will make any different for performance, but it's unlikely to hurt anything, and it's nice to separate the "unlink" and "free" functions. Reviewed-by: Jeff Layton <jlayton@xxxxxxxxxx>