Delegations can be expensive to return, and can cause scalability issues for the server. Let's therefore try to limit the number of inactive delegations we hold. Once the number of delegations is above a certain threshold, start to return them on close. Signed-off-by: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> --- fs/nfs/delegation.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index a777b3d0e720..4a841071d8a7 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -25,7 +25,10 @@ #include "internal.h" #include "nfs4trace.h" +#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U) + static atomic_long_t nfs_active_delegations; +static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK; static void __nfs_free_delegation(struct nfs_delegation *delegation) { @@ -676,7 +679,8 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode) delegation = nfs4_get_valid_delegation(inode); if (!delegation) goto out; - if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { + if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) || + atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) { spin_lock(&delegation->lock); if (delegation->inode && list_empty(&NFS_I(inode)->open_files) && @@ -1365,3 +1369,5 @@ bool nfs4_delegation_flush_on_close(const struct inode *inode) rcu_read_unlock(); return ret; } + +module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); -- 2.24.1