> On Apr 14, 2023, at 3:17 PM, Jeff Layton <jlayton@xxxxxxxxxx> wrote: > > On Fri, 2023-04-14 at 18:20 +0000, Chuck Lever III wrote: >>> On Jan 18, 2023, at 12:31 PM, Jeff Layton <jlayton@xxxxxxxxxx> >>> wrote: >>> >>> When queueing a dispose list to the appropriate "freeme" lists, it >>> pointlessly queues the objects one at a time to an intermediate >>> list. >>> >>> Remove a few helpers and just open code a list_move to make it more >>> clear and efficient. Better document the resulting functions with >>> kerneldoc comments. >>> >>> Signed-off-by: Jeff Layton <jlayton@xxxxxxxxxx> >>> --- >>> fs/nfsd/filecache.c | 63 +++++++++++++++---------------------------- >>> -- >>> 1 file changed, 21 insertions(+), 42 deletions(-) >>> >>> diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c >>> index 58ac93e7e680..a2bc4bd90b9a 100644 >>> --- a/fs/nfsd/filecache.c >>> +++ b/fs/nfsd/filecache.c >>> @@ -513,49 +513,25 @@ nfsd_file_dispose_list(struct list_head >>> *dispose) >>> } >>> } >>> >>> -static void >>> -nfsd_file_list_remove_disposal(struct list_head *dst, >>> - struct nfsd_fcache_disposal *l) >>> -{ >>> - spin_lock(&l->lock); >>> - list_splice_init(&l->freeme, dst); >>> - spin_unlock(&l->lock); >>> -} >>> - >>> -static void >>> -nfsd_file_list_add_disposal(struct list_head *files, struct net >>> *net) >>> -{ >>> - struct nfsd_net *nn = net_generic(net, nfsd_net_id); >>> - struct nfsd_fcache_disposal *l = nn->fcache_disposal; >>> - >>> - spin_lock(&l->lock); >>> - list_splice_tail_init(files, &l->freeme); >>> - spin_unlock(&l->lock); >>> - queue_work(nfsd_filecache_wq, &l->work); >>> -} >>> - >>> -static void >>> -nfsd_file_list_add_pernet(struct list_head *dst, struct list_head >>> *src, >>> - struct net *net) >>> -{ >>> - struct nfsd_file *nf, *tmp; >>> - >>> - list_for_each_entry_safe(nf, tmp, src, nf_lru) { >>> - if (nf->nf_net == net) >>> - list_move_tail(&nf->nf_lru, dst); >>> - } >>> -} >>> - >>> +/** >>> + * nfsd_file_dispose_list_delayed - move list of dead files to >>> net's freeme list >>> + * @dispose: list of nfsd_files to be disposed >>> + * >>> + * Transfers each file to the "freeme" list for its nfsd_net, to >>> eventually >>> + * be disposed of by the per-net garbage collector. >>> + */ >>> static void >>> nfsd_file_dispose_list_delayed(struct list_head *dispose) >>> { >>> - LIST_HEAD(list); >>> - struct nfsd_file *nf; >>> - >>> while(!list_empty(dispose)) { >>> - nf = list_first_entry(dispose, struct nfsd_file, nf_lru); >>> - nfsd_file_list_add_pernet(&list, dispose, nf->nf_net); >>> - nfsd_file_list_add_disposal(&list, nf->nf_net); >>> + struct nfsd_file *nf = list_first_entry(dispose, >>> + struct nfsd_file, nf_lru); >>> + struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id); >>> + struct nfsd_fcache_disposal *l = nn->fcache_disposal; >>> + >>> + spin_lock(&l->lock); >>> + list_move_tail(&nf->nf_lru, &l->freeme); >>> + spin_unlock(&l->lock); >>> } >>> } >>> >>> @@ -765,8 +741,8 @@ nfsd_file_close_inode_sync(struct inode *inode) >>> * nfsd_file_delayed_close - close unused nfsd_files >>> * @work: dummy >>> * >>> - * Walk the LRU list and destroy any entries that have not been >>> used since >>> - * the last scan. >>> + * Scrape the freeme list for this nfsd_net, and then dispose of >>> them >>> + * all. >>> */ >>> static void >>> nfsd_file_delayed_close(struct work_struct *work) >>> @@ -775,7 +751,10 @@ nfsd_file_delayed_close(struct work_struct >>> *work) >>> struct nfsd_fcache_disposal *l = container_of(work, >>> struct nfsd_fcache_disposal, work); >>> >>> - nfsd_file_list_remove_disposal(&head, l); >>> + spin_lock(&l->lock); >>> + list_splice_init(&l->freeme, &head); >>> + spin_unlock(&l->lock); >>> + >>> nfsd_file_dispose_list(&head); >>> } >> >> Hey Jeff - >> >> After applying this one, tmpfs exports appear to leak space, >> even after all files and directories are deleted. Eventually >> the filesystem is "full" -- modifying operations return ENOSPC >> but removing files doesn't recover the used space. >> >> Can you have a look at this? > Hrm, ok. Do you have a reproducer? Nothing special. Any workload that cleans up after itself should leave the "df -k" %Used column at zero percent when it finishes. What I'm seeing is that %Used never goes down. > Actually, I may see the bug. Does this fix it? > > diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c > index c173d460b17d..f40d8f3b35a4 100644 > --- a/fs/nfsd/filecache.c > +++ b/fs/nfsd/filecache.c > @@ -421,6 +421,7 @@ nfsd_file_dispose_list_delayed(struct list_head > *dispose) > spin_lock(&l->lock); > list_move_tail(&nf->nf_lru, &l->freeme); > spin_unlock(&l->lock); > + queue_work(nfsd_filecache_wq, &l->work); > } > } Yes, that addresses the symptom. I'll drop this version of the patch, send me a refresh? -- Chuck Lever