On Thu, Sep 17, 2020 at 11:28:29AM -0400, Brian Foster wrote: > On Wed, Sep 16, 2020 at 08:30:09PM -0700, Darrick J. Wong wrote: > > From: Darrick J. Wong <darrick.wong@xxxxxxxxxx> > > > > Now that we've landed a means for the defer ops manager to ask log items > > to relog themselves to move the log tail forward, we can improve how we > > decide when to relog so that we're not just using an arbitrary hardcoded > > value. > > > > The XFS log has "push threshold", which tells us how far we'd have to > > move the log tail forward to keep 25% of the ondisk log space available. > > We use this threshold to decide when to force defer ops chains to relog > > themselves. This avoids unnecessary relogging (which adds extra steps > > to metadata updates) while helping us to avoid pinning the tail. > > > > A better algorithm would be to relog only when we detect that the time > > required to move the tail forward is greater than the time remaining > > before all the log space gets used up, but letting the upper levels > > drive the relogging means that it is difficult to coordinate relogging > > the lowest LSN'd intents first. > > > > Signed-off-by: Darrick J. Wong <darrick.wong@xxxxxxxxxx> > > --- > > FYI, the commit log doesn't match the git branch referenced in the cover > letter. > > fs/xfs/libxfs/xfs_defer.c | 32 +++++++++++++++++++++++++------- > > fs/xfs/xfs_log.c | 41 +++++++++++++++++++++++++++++++---------- > > fs/xfs/xfs_log.h | 2 ++ > > 3 files changed, 58 insertions(+), 17 deletions(-) > > > > > > diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c > > index 7938e4d3af90..97ec36f32a0a 100644 > > --- a/fs/xfs/libxfs/xfs_defer.c > > +++ b/fs/xfs/libxfs/xfs_defer.c > > @@ -17,6 +17,7 @@ > > #include "xfs_inode_item.h" > > #include "xfs_trace.h" > > #include "xfs_icache.h" > > +#include "xfs_log.h" > > > > /* > > * Deferred Operations in XFS > > @@ -372,15 +373,35 @@ xfs_defer_relog( > > struct list_head *dfops) > > { > > struct xfs_defer_pending *dfp; > > + xfs_lsn_t threshold_lsn; > > > > ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES); > > > > + /* > > + * Figure out where we need the tail to be in order to maintain the > > + * minimum required free space in the log. > > + */ > > + threshold_lsn = xlog_grant_push_threshold((*tpp)->t_mountp->m_log, 0); > > + if (threshold_lsn == NULLCOMMITLSN) > > + return 0; > > + > > list_for_each_entry(dfp, dfops, dfp_list) { > > + /* > > + * If the log intent item for this deferred op is behind the > > + * threshold, we're running out of space and need to relog it > > + * to release the tail. > > + */ > > + if (dfp->dfp_intent == NULL || > > Any reason the NULL check isn't in the previous patch? No. Now that I've squashed all these patches together, all the wonky churn should be eliminated. > > + XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) < 0) > > + continue; > > + > > Logic looks backwards, we should relog (not skip) if li_lsn is within > the threshold, right? Oops, fixed now. > > trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp); > > dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp); > > } > > > > - return xfs_defer_trans_roll(tpp); > > + if ((*tpp)->t_flags & XFS_TRANS_DIRTY) > > + return xfs_defer_trans_roll(tpp); > > I suspect this churn is eliminated if this code uses the threshold logic > from the start.. > > > + return 0; > > } > > > > /* > > @@ -444,7 +465,6 @@ xfs_defer_finish_noroll( > > struct xfs_trans **tp) > > { > > struct xfs_defer_pending *dfp; > > - unsigned int nr_rolls = 0; > > int error = 0; > > LIST_HEAD(dop_pending); > > > > @@ -471,11 +491,9 @@ xfs_defer_finish_noroll( > > goto out_shutdown; > > > > /* Every few rolls we relog all the intent items. */ > > - if (!(++nr_rolls % 7)) { > > - error = xfs_defer_relog(tp, &dop_pending); > > - if (error) > > - goto out_shutdown; > > - } > > + error = xfs_defer_relog(tp, &dop_pending); > > + if (error) > > + goto out_shutdown; > > > > dfp = list_first_entry(&dop_pending, struct xfs_defer_pending, > > dfp_list); > > diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c > > index ad0c69ee8947..62c9e0aaa7df 100644 > > --- a/fs/xfs/xfs_log.c > > +++ b/fs/xfs/xfs_log.c > > @@ -1475,14 +1475,15 @@ xlog_commit_record( > > } > > > > /* > > - * Push on the buffer cache code if we ever use more than 75% of the on-disk > > - * log space. This code pushes on the lsn which would supposedly free up > > - * the 25% which we want to leave free. We may need to adopt a policy which > > - * pushes on an lsn which is further along in the log once we reach the high > > - * water mark. In this manner, we would be creating a low water mark. > > + * Compute the LSN push target needed to push on the buffer cache code if we > > + * ever use more than 75% of the on-disk log space. This code pushes on the > > + * lsn which would supposedly free up the 25% which we want to leave free. We > > + * may need to adopt a policy which pushes on an lsn which is further along in > > + * the log once we reach the high water mark. In this manner, we would be > > + * creating a low water mark. > > */ > > -STATIC void > > -xlog_grant_push_ail( > > +xfs_lsn_t > > +xlog_grant_push_threshold( > > struct xlog *log, > > int need_bytes) > > { > > @@ -1508,7 +1509,7 @@ xlog_grant_push_ail( > > free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); > > free_threshold = max(free_threshold, 256); > > if (free_blocks >= free_threshold) > > - return; > > + return NULLCOMMITLSN; > > > > xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, > > &threshold_block); > > @@ -1528,13 +1529,33 @@ xlog_grant_push_ail( > > if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) > > threshold_lsn = last_sync_lsn; > > > > + return threshold_lsn; > > +} > > + > > +/* > > + * Push on the buffer cache code if we ever use more than 75% of the on-disk > > + * log space. This code pushes on the lsn which would supposedly free up > > + * the 25% which we want to leave free. We may need to adopt a policy which > > + * pushes on an lsn which is further along in the log once we reach the high > > + * water mark. In this manner, we would be creating a low water mark. > > + */ > > +STATIC void > > +xlog_grant_push_ail( > > + struct xlog *log, > > + int need_bytes) > > +{ > > + xfs_lsn_t threshold_lsn; > > + > > + threshold_lsn = xlog_grant_push_threshold(log, need_bytes); > > + if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) > > + return; > > + > > /* > > * Get the transaction layer to kick the dirty buffers out to > > * disk asynchronously. No point in trying to do this if > > * the filesystem is shutting down. > > */ > > - if (!XLOG_FORCED_SHUTDOWN(log)) > > - xfs_ail_push(log->l_ailp, threshold_lsn); > > + xfs_ail_push(log->l_ailp, threshold_lsn); > > } > > Separate refactoring patch for the xfs_log.c bits, please. Ok, fixed. --D > Brian > > > > > /* > > diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h > > index 1412d6993f1e..58c3fcbec94a 100644 > > --- a/fs/xfs/xfs_log.h > > +++ b/fs/xfs/xfs_log.h > > @@ -141,4 +141,6 @@ void xfs_log_quiesce(struct xfs_mount *mp); > > bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t); > > bool xfs_log_in_recovery(struct xfs_mount *); > > > > +xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes); > > + > > #endif /* __XFS_LOG_H__ */ > > >