From: Darrick J. Wong <darrick.wong@xxxxxxxxxx> Add a copy_file_range handler to XFS so that we can accelerate file copies with reflink when the source and destination ranges are not block-aligned. We'll use the generic pagecache copy to handle the unaligned edges and attempt to reflink the middle. Signed-off-by: Darrick J. Wong <darrick.wong@xxxxxxxxxx> --- fs/xfs/xfs_file.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 5b0f93f73837..9d1bb0dc30e2 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1119,6 +1119,104 @@ xfs_file_remap_range( return remapped > 0 ? remapped : ret; } +/* + * Decide if we want to use reflink to accelerate a copy_file_range request. + * + * We need to use the generic pagecache copy routine if there's no reflink; if + * the two files are on different filesystems; if the two files are on + * different devices; or if the two offsets are not at the same offset within + * an fs block. Studies on the author's computer show that reflink doesn't + * speed up copies smaller than 32k, so use the page cache for those. + */ +static inline bool +xfs_want_reflink_copy_range( + struct xfs_inode *src, + unsigned int src_off, + struct xfs_inode *dst, + unsigned int dst_off, + size_t len) +{ + struct xfs_mount *mp = src->i_mount; + + if (len < 32768) + return false; + if (mp != dst->i_mount) + return false; + if (!xfs_sb_version_hasreflink(&mp->m_sb)) + return false; + if (XFS_IS_REALTIME_INODE(src) != XFS_IS_REALTIME_INODE(dst)) + return false; + return (src_off & mp->m_blockmask) == (dst_off & mp->m_blockmask); +} + +STATIC ssize_t +xfs_file_copy_range( + struct file *src_file, + loff_t src_off, + struct file *dst_file, + loff_t dst_off, + size_t len, + unsigned int flags) +{ + struct inode *inode_src = file_inode(src_file); + struct xfs_inode *src = XFS_I(inode_src); + struct inode *inode_dst = file_inode(dst_file); + struct xfs_inode *dst = XFS_I(inode_dst); + struct xfs_mount *mp = src->i_mount; + loff_t copy_ret; + loff_t next_block; + size_t copy_len; + ssize_t total_copied = 0; + + /* Bypass all this if no copy acceleration is possible. */ + if (!xfs_want_reflink_copy_range(src, src_off, dst, dst_off, len)) + goto use_generic; + + /* Use the regular copy until we're block aligned at the start. */ + next_block = round_up(src_off + 1, mp->m_sb.sb_blocksize); + copy_len = min_t(size_t, len, next_block - src_off); + if (copy_len > 0) { + copy_ret = generic_copy_file_range(src_file, src_off, dst_file, + dst_off, copy_len, flags); + if (copy_ret < 0) + return copy_ret; + + src_off += copy_ret; + dst_off += copy_ret; + len -= copy_ret; + total_copied += copy_ret; + if (copy_ret < copy_len || len == 0) + return total_copied; + } + + /* + * Now try to reflink as many full blocks as we can. If the end of the + * copy request wasn't block-aligned or the reflink fails, we'll just + * fall into the generic copy to do the rest. + */ + copy_len = round_down(len, mp->m_sb.sb_blocksize); + if (copy_len > 0) { + copy_ret = xfs_file_remap_range(src_file, src_off, dst_file, + dst_off, copy_len, REMAP_FILE_CAN_SHORTEN); + if (copy_ret >= 0) { + src_off += copy_ret; + dst_off += copy_ret; + len -= copy_ret; + total_copied += copy_ret; + if (copy_ret < copy_len || len == 0) + return total_copied; + } + } + +use_generic: + /* Use the regular copy to deal with leftover bytes. */ + copy_ret = generic_copy_file_range(src_file, src_off, dst_file, + dst_off, len, flags); + if (copy_ret < 0) + return copy_ret; + return total_copied + copy_ret; +} + STATIC int xfs_file_open( struct inode *inode, @@ -1381,6 +1479,7 @@ const struct file_operations xfs_file_operations = { .get_unmapped_area = thp_get_unmapped_area, .fallocate = xfs_file_fallocate, .fadvise = xfs_file_fadvise, + .copy_file_range = xfs_file_copy_range, .remap_file_range = xfs_file_remap_range, };