From: Josef Bacik <josef@xxxxxxxxxxxxxx> Now all the helpers that btrfs_check_nocow_lock uses handle nowait, add a nowait flag to btrfs_check_nocow_lock so it can be used by the write path. Signed-off-by: Josef Bacik <josef@xxxxxxxxxxxxxx> Signed-off-by: Stefan Roesch <shr@xxxxxx> --- fs/btrfs/ctree.h | 2 +- fs/btrfs/file.c | 33 ++++++++++++++++++++++----------- fs/btrfs/inode.c | 2 +- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5cc5394503e0..eb25d007e651 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3480,7 +3480,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, struct extent_state **cached, bool noreserve); int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, - size_t *write_bytes); + size_t *write_bytes, bool nowait); void btrfs_check_nocow_unlock(struct btrfs_inode *inode); /* tree-defrag.c */ diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0f257205c63d..f18efd9f2bc3 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1481,7 +1481,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0. */ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, - size_t *write_bytes) + size_t *write_bytes, bool nowait) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_root *root = inode->root; @@ -1500,16 +1500,21 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, fs_info->sectorsize) - 1; num_bytes = lockend - lockstart + 1; - btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL); + if (nowait) { + if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend)) { + btrfs_drew_write_unlock(&root->snapshot_lock); + return -EAGAIN; + } + } else { + btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL); + } ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, - NULL, NULL, NULL, false, false); - if (ret <= 0) { - ret = 0; + NULL, NULL, NULL, nowait, false); + if (ret <= 0) btrfs_drew_write_unlock(&root->snapshot_lock); - } else { + else *write_bytes = min_t(size_t, *write_bytes , num_bytes - pos + lockstart); - } unlock_extent(&inode->io_tree, lockstart, lockend); return ret; @@ -1666,16 +1671,22 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, &data_reserved, pos, write_bytes, false); if (ret < 0) { + int can_nocow; + /* * If we don't have to COW at the offset, reserve * metadata only. write_bytes may get smaller than * requested here. */ - if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, - &write_bytes) > 0) - only_release_metadata = true; - else + can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos, + &write_bytes, false); + if (can_nocow < 0) + ret = can_nocow; + if (can_nocow > 0) + ret = 0; + if (ret) break; + only_release_metadata = true; } num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 52b3abb4c57c..4ff07b47df82 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4883,7 +4883,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, blocksize, false); if (ret < 0) { - if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) { + if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { /* For nocow case, no need to reserve data space */ only_release_metadata = true; } else { -- 2.30.2