On Tue, Aug 27, 2024 at 07:09:54AM +0200, Christoph Hellwig wrote: > xfs_buffered_write_iomap_begin can also create delallocate reservations > that need cleaning up, prepare for that by adding support for the COW > fork in xfs_bmap_punch_delalloc_range. > > Signed-off-by: Christoph Hellwig <hch@xxxxxx> Seems fine to me. Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx> --D > --- > fs/xfs/xfs_aops.c | 4 ++-- > fs/xfs/xfs_bmap_util.c | 10 +++++++--- > fs/xfs/xfs_bmap_util.h | 2 +- > fs/xfs/xfs_iomap.c | 3 ++- > 4 files changed, 12 insertions(+), 7 deletions(-) > > diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c > index 6dead20338e24c..559a3a57709748 100644 > --- a/fs/xfs/xfs_aops.c > +++ b/fs/xfs/xfs_aops.c > @@ -116,7 +116,7 @@ xfs_end_ioend( > if (unlikely(error)) { > if (ioend->io_flags & IOMAP_F_SHARED) { > xfs_reflink_cancel_cow_range(ip, offset, size, true); > - xfs_bmap_punch_delalloc_range(ip, offset, > + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, offset, > offset + size); > } > goto done; > @@ -456,7 +456,7 @@ xfs_discard_folio( > * byte of the next folio. Hence the end offset is only dependent on the > * folio itself and not the start offset that is passed in. > */ > - xfs_bmap_punch_delalloc_range(ip, pos, > + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, > folio_pos(folio) + folio_size(folio)); > } > > diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c > index fe2e2c93097550..15c8f90f19a934 100644 > --- a/fs/xfs/xfs_bmap_util.c > +++ b/fs/xfs/xfs_bmap_util.c > @@ -443,11 +443,12 @@ xfs_getbmap( > void > xfs_bmap_punch_delalloc_range( > struct xfs_inode *ip, > + int whichfork, > xfs_off_t start_byte, > xfs_off_t end_byte) > { > struct xfs_mount *mp = ip->i_mount; > - struct xfs_ifork *ifp = &ip->i_df; > + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); > xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte); > xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte); > struct xfs_bmbt_irec got, del; > @@ -475,11 +476,14 @@ xfs_bmap_punch_delalloc_range( > continue; > } > > - xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, &got, &del); > + xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del); > if (!xfs_iext_get_extent(ifp, &icur, &got)) > break; > } > > + if (whichfork == XFS_COW_FORK && !ifp->if_bytes) > + xfs_inode_clear_cowblocks_tag(ip); > + > out_unlock: > xfs_iunlock(ip, XFS_ILOCK_EXCL); > } > @@ -590,7 +594,7 @@ xfs_free_eofblocks( > */ > if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) { > if (ip->i_delayed_blks) { > - xfs_bmap_punch_delalloc_range(ip, > + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, > round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize), > LLONG_MAX); > } > diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h > index eb0895bfb9dae4..b29760d36e1ab1 100644 > --- a/fs/xfs/xfs_bmap_util.h > +++ b/fs/xfs/xfs_bmap_util.h > @@ -30,7 +30,7 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap) > } > #endif /* CONFIG_XFS_RT */ > > -void xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, > +void xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, int whichfork, > xfs_off_t start_byte, xfs_off_t end_byte); > > struct kgetbmap { > diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c > index 1e11f48814c0d0..24d69c8c168aeb 100644 > --- a/fs/xfs/xfs_iomap.c > +++ b/fs/xfs/xfs_iomap.c > @@ -1215,7 +1215,8 @@ xfs_buffered_write_delalloc_punch( > loff_t length, > struct iomap *iomap) > { > - xfs_bmap_punch_delalloc_range(XFS_I(inode), offset, offset + length); > + xfs_bmap_punch_delalloc_range(XFS_I(inode), XFS_DATA_FORK, offset, > + offset + length); > } > > static int > -- > 2.43.0 > >