> On Nov 16, 2022, at 10:44 PM, Dai Ngo <dai.ngo@xxxxxxxxxx> wrote: > > The delegation reaper is called by nfsd memory shrinker's on > the 'count' callback. It scans the client list and sends the > courtesy CB_RECALL_ANY to the clients that hold delegations. > > To avoid flooding the clients with CB_RECALL_ANY requests, the > delegation reaper sends only one CB_RECALL_ANY request to each > client per 5 seconds. > > Signed-off-by: Dai Ngo <dai.ngo@xxxxxxxxxx> > --- > fs/nfsd/nfs4state.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++--- > fs/nfsd/state.h | 8 +++++ > 2 files changed, 93 insertions(+), 4 deletions(-) > > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c > index 142481bc96de..13f326ae928c 100644 > --- a/fs/nfsd/nfs4state.c > +++ b/fs/nfsd/nfs4state.c > @@ -2131,6 +2131,7 @@ static void __free_client(struct kref *k) > kfree(clp->cl_nii_domain.data); > kfree(clp->cl_nii_name.data); > idr_destroy(&clp->cl_stateids); > + kfree(clp->cl_ra); > kmem_cache_free(client_slab, clp); > } > > @@ -2854,6 +2855,36 @@ static const struct tree_descr client_files[] = { > [3] = {""}, > }; > > +static int > +nfsd4_cb_recall_any_done(struct nfsd4_callback *cb, > + struct rpc_task *task) > +{ > + switch (task->tk_status) { > + case -NFS4ERR_DELAY: > + rpc_delay(task, 2 * HZ); > + return 0; > + default: > + return 1; > + } > +} > + > +static void > +nfsd4_cb_recall_any_release(struct nfsd4_callback *cb) > +{ > + struct nfs4_client *clp = cb->cb_clp; > + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); > + > + spin_lock(&nn->client_lock); > + clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); > + put_client_renew_locked(clp); > + spin_unlock(&nn->client_lock); > +} > + > +static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { > + .done = nfsd4_cb_recall_any_done, > + .release = nfsd4_cb_recall_any_release, > +}; > + > static struct nfs4_client *create_client(struct xdr_netobj name, > struct svc_rqst *rqstp, nfs4_verifier *verf) > { > @@ -2891,6 +2922,14 @@ static struct nfs4_client *create_client(struct xdr_netobj name, > free_client(clp); > return NULL; > } > + clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); > + if (!clp->cl_ra) { > + free_client(clp); > + return NULL; > + } > + clp->cl_ra_time = 0; > + nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, > + NFSPROC4_CLNT_CB_RECALL_ANY); > return clp; > } > > @@ -4349,14 +4388,16 @@ nfsd4_init_slabs(void) > static unsigned long > nfsd_lowmem_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) > { > - int cnt; > + int count; > struct nfsd_net *nn = container_of(shrink, > struct nfsd_net, nfsd_client_shrinker); > > - cnt = atomic_read(&nn->nfsd_courtesy_clients); > - if (cnt > 0) > + count = atomic_read(&nn->nfsd_courtesy_clients); > + if (!count) > + count = atomic_long_read(&num_delegations); > + if (count) > mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0); > - return (unsigned long)cnt; > + return (unsigned long)count; > } > > static unsigned long > @@ -6134,6 +6175,45 @@ courtesy_client_reaper(struct nfsd_net *nn) > } > > static void > +deleg_reaper(struct nfsd_net *nn) > +{ > + struct list_head *pos, *next; > + struct nfs4_client *clp; > + struct list_head cblist; > + > + INIT_LIST_HEAD(&cblist); > + spin_lock(&nn->client_lock); > + list_for_each_safe(pos, next, &nn->client_lru) { > + clp = list_entry(pos, struct nfs4_client, cl_lru); > + if (clp->cl_state != NFSD4_ACTIVE || > + list_empty(&clp->cl_delegations) || > + atomic_read(&clp->cl_delegs_in_recall) || > + test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || > + (ktime_get_boottime_seconds() - > + clp->cl_ra_time < 5)) { > + continue; > + } > + list_add(&clp->cl_ra_cblist, &cblist); > + > + /* release in nfsd4_cb_recall_any_release */ > + atomic_inc(&clp->cl_rpc_users); > + set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); > + clp->cl_ra_time = ktime_get_boottime_seconds(); > + } > + spin_unlock(&nn->client_lock); > + > + while (!list_empty(&cblist)) { > + clp = list_first_entry(&cblist, struct nfs4_client, > + cl_ra_cblist); > + list_del_init(&clp->cl_ra_cblist); > + clp->cl_ra->ra_keep = 0; > + clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) | > + BIT(RCA4_TYPE_MASK_WDATA_DLG); Linux NFSD doesn't hand out write delegations. I don't think we should set WDATA_DLG yet...? > + nfsd4_run_cb(&clp->cl_ra->ra_cb); > + } > +} > + > +static void > nfsd4_lowmem_shrinker(struct work_struct *work) > { > struct delayed_work *dwork = to_delayed_work(work); > @@ -6141,6 +6221,7 @@ nfsd4_lowmem_shrinker(struct work_struct *work) > nfsd_shrinker_work); > > courtesy_client_reaper(nn); > + deleg_reaper(nn); > } > > static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) > diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h > index 6b33cbbe76d3..12ce9792c5b6 100644 > --- a/fs/nfsd/state.h > +++ b/fs/nfsd/state.h > @@ -368,6 +368,7 @@ struct nfs4_client { > #define NFSD4_CLIENT_UPCALL_LOCK (5) /* upcall serialization */ > #define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \ > 1 << NFSD4_CLIENT_CB_KILL) > +#define NFSD4_CLIENT_CB_RECALL_ANY (6) > unsigned long cl_flags; > const struct cred *cl_cb_cred; > struct rpc_clnt *cl_cb_client; > @@ -411,6 +412,10 @@ struct nfs4_client { > > unsigned int cl_state; > atomic_t cl_delegs_in_recall; > + > + struct nfsd4_cb_recall_any *cl_ra; > + time64_t cl_ra_time; > + struct list_head cl_ra_cblist; > }; > > /* struct nfs4_client_reset > @@ -642,6 +647,9 @@ enum nfsd4_cb_op { > NFSPROC4_CLNT_CB_RECALL_ANY, > }; > > +#define RCA4_TYPE_MASK_RDATA_DLG 0 > +#define RCA4_TYPE_MASK_WDATA_DLG 1 > + > /* Returns true iff a is later than b: */ > static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b) > { > -- > 2.9.5 > -- Chuck Lever