On Fri, Nov 24, 2023 at 09:13:22PM -0800, Christoph Hellwig wrote: > This generally looks good: > > Reviewed-by: Christoph Hellwig <hch@xxxxxx> > > But as mentioned earlier I think it would be nice to share some > code between the normal xfs_defer_add and xfs_defer_add_barrier. > > This would be the fold for this patch on top of the one previously > posted and a light merge fix for the pausing between: Applied. Thanks for the quick cleanup! --D > diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c > index e70dc347074dc5..5d621e445e8ab9 100644 > --- a/fs/xfs/libxfs/xfs_defer.c > +++ b/fs/xfs/libxfs/xfs_defer.c > @@ -753,6 +753,22 @@ xfs_defer_can_append( > return true; > } > > +/* Create a new pending item at the end of the intake list. */ > +static struct xfs_defer_pending * > +xfs_defer_alloc( > + struct xfs_trans *tp, > + enum xfs_defer_ops_type type) > +{ > + struct xfs_defer_pending *dfp; > + > + dfp = kmem_cache_zalloc(xfs_defer_pending_cache, > + GFP_NOFS | __GFP_NOFAIL); > + dfp->dfp_type = type; > + INIT_LIST_HEAD(&dfp->dfp_work); > + list_add_tail(&dfp->dfp_list, &tp->t_dfops); > + return dfp; > +}; > + > /* Add an item for later deferred processing. */ > struct xfs_defer_pending * > xfs_defer_add( > @@ -760,29 +776,18 @@ xfs_defer_add( > enum xfs_defer_ops_type type, > struct list_head *li) > { > - struct xfs_defer_pending *dfp = NULL; > const struct xfs_defer_op_type *ops = defer_op_types[type]; > + struct xfs_defer_pending *dfp; > > ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); > BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX); > > dfp = xfs_defer_find(tp, type); > - if (!dfp || !xfs_defer_can_append(dfp, ops)) { > - /* Create a new pending item at the end of the intake list. */ > - dfp = kmem_cache_zalloc(xfs_defer_pending_cache, > - GFP_NOFS | __GFP_NOFAIL); > - dfp->dfp_type = type; > - dfp->dfp_intent = NULL; > - dfp->dfp_done = NULL; > - dfp->dfp_count = 0; > - INIT_LIST_HEAD(&dfp->dfp_work); > - list_add_tail(&dfp->dfp_list, &tp->t_dfops); > - } > - > + if (!dfp || !xfs_defer_can_append(dfp, ops)) > + dfp = xfs_defer_alloc(tp, type); > list_add_tail(li, &dfp->dfp_work); > trace_xfs_defer_add_item(tp->t_mountp, dfp, li); > dfp->dfp_count++; > - > return dfp; > } > > @@ -1106,19 +1111,10 @@ xfs_defer_add_barrier( > ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); > > /* If the last defer op added was a barrier, we're done. */ > - if (!list_empty(&tp->t_dfops)) { > - dfp = list_last_entry(&tp->t_dfops, > - struct xfs_defer_pending, dfp_list); > - if (dfp->dfp_type == XFS_DEFER_OPS_TYPE_BARRIER) > - return; > - } > - > - dfp = kmem_cache_zalloc(xfs_defer_pending_cache, > - GFP_NOFS | __GFP_NOFAIL); > - dfp->dfp_type = XFS_DEFER_OPS_TYPE_BARRIER; > - INIT_LIST_HEAD(&dfp->dfp_work); > - list_add_tail(&dfp->dfp_list, &tp->t_dfops); > - > + dfp = xfs_defer_find(tp, XFS_DEFER_OPS_TYPE_BARRIER); > + if (dfp) > + return; > + dfp = xfs_defer_alloc(tp, XFS_DEFER_OPS_TYPE_BARRIER); > trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL); > dfp->dfp_count++; > } >