On Fri, Nov 01, 2019 at 10:46:16AM +1100, Dave Chinner wrote: > From: Dave Chinner <dchinner@xxxxxxxxxx> > > If we are reclaiming all inodes, it is likely we need to flush the > entire AIL to do that. We have mechanisms to do that without needing > to push to a specific LSN. > > Convert xfs_relaim_all_inodes() to use xfs_ail_push_all variant so > we can get rid of the hacky xfs_ail_push_sync() scaffolding we used > to support the intermediate stages of the non-blocking reclaim > changeset. > > Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> > --- > fs/xfs/xfs_icache.c | 17 +++++++++++------ > fs/xfs/xfs_trans_ail.c | 32 -------------------------------- > fs/xfs/xfs_trans_priv.h | 2 -- > 3 files changed, 11 insertions(+), 40 deletions(-) > > diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c > index 71a729e29260..11bf4768d491 100644 > --- a/fs/xfs/xfs_icache.c > +++ b/fs/xfs/xfs_icache.c ... > @@ -1066,13 +1074,10 @@ xfs_reclaim_all_inodes( > xfs_inode_reclaim_isolate, &ra, to_free); > xfs_dispose_inodes(&ra.freeable); > > - if (freed == 0) { > + if (freed == 0) > xfs_log_force(mp, XFS_LOG_SYNC); > - xfs_ail_push_all(mp->m_ail); > - } else if (ra.lowest_lsn != NULLCOMMITLSN) { > - xfs_ail_push_sync(mp->m_ail, ra.lowest_lsn); > - } > - cond_resched(); > + else if (ra.dirty_skipped) > + congestion_wait(BLK_RW_ASYNC, HZ/10); Why not use xfs_ail_push_all_sync() in this function and skip the direct stall? This is only used in the unmount and quiesce paths so the big hammer approach seems reasonable. As it is, the former already calls xfs_ail_push_all_sync() before xfs_reclaim_all_inodes() and the latter calls xfs_log_force(mp, XFS_LOG_SYNC). Brian > } > } > > diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c > index 3e1d0e1439e2..685a21cd24c0 100644 > --- a/fs/xfs/xfs_trans_ail.c > +++ b/fs/xfs/xfs_trans_ail.c > @@ -662,36 +662,6 @@ xfs_ail_push_all( > xfs_ail_push(ailp, threshold_lsn); > } > > -/* > - * Push the AIL to a specific lsn and wait for it to complete. > - */ > -void > -xfs_ail_push_sync( > - struct xfs_ail *ailp, > - xfs_lsn_t threshold_lsn) > -{ > - struct xfs_log_item *lip; > - DEFINE_WAIT(wait); > - > - spin_lock(&ailp->ail_lock); > - while ((lip = xfs_ail_min(ailp)) != NULL) { > - prepare_to_wait(&ailp->ail_push, &wait, TASK_UNINTERRUPTIBLE); > - if (XFS_FORCED_SHUTDOWN(ailp->ail_mount) || > - XFS_LSN_CMP(threshold_lsn, lip->li_lsn) < 0) > - break; > - if (XFS_LSN_CMP(threshold_lsn, ailp->ail_target) > 0) > - ailp->ail_target = threshold_lsn; > - wake_up_process(ailp->ail_task); > - spin_unlock(&ailp->ail_lock); > - schedule(); > - spin_lock(&ailp->ail_lock); > - } > - spin_unlock(&ailp->ail_lock); > - > - finish_wait(&ailp->ail_push, &wait); > -} > - > - > /* > * Push out all items in the AIL immediately and wait until the AIL is empty. > */ > @@ -732,7 +702,6 @@ xfs_ail_update_finish( > if (!XFS_FORCED_SHUTDOWN(mp)) > xlog_assign_tail_lsn_locked(mp); > > - wake_up_all(&ailp->ail_push); > if (list_empty(&ailp->ail_head)) > wake_up_all(&ailp->ail_empty); > spin_unlock(&ailp->ail_lock); > @@ -889,7 +858,6 @@ xfs_trans_ail_init( > spin_lock_init(&ailp->ail_lock); > INIT_LIST_HEAD(&ailp->ail_buf_list); > init_waitqueue_head(&ailp->ail_empty); > - init_waitqueue_head(&ailp->ail_push); > > ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s", > ailp->ail_mount->m_fsname); > diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h > index 1b6f4bbd47c0..35655eac01a6 100644 > --- a/fs/xfs/xfs_trans_priv.h > +++ b/fs/xfs/xfs_trans_priv.h > @@ -61,7 +61,6 @@ struct xfs_ail { > int ail_log_flush; > struct list_head ail_buf_list; > wait_queue_head_t ail_empty; > - wait_queue_head_t ail_push; > }; > > /* > @@ -114,7 +113,6 @@ xfs_trans_ail_remove( > } > > void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); > -void xfs_ail_push_sync(struct xfs_ail *, xfs_lsn_t); > void xfs_ail_push_all(struct xfs_ail *); > void xfs_ail_push_all_sync(struct xfs_ail *); > struct xfs_log_item *xfs_ail_min(struct xfs_ail *ailp); > -- > 2.24.0.rc0 >