Tree manipulating operations like merging nodes often release once-allocated tree nodes. Btrfs cleans such nodes so that pages in the node are not uselessly written out. On HMZONED drives, however, such optimization blocks the following IOs as the cancellation of the write out of the freed blocks breaks the sequential write sequence expected by the device. This patch check if next dirty extent buffer is continuous to a previously written one. If not, it redirty extent buffers between the previous one and the next one, so that all dirty buffers are written sequentially. Signed-off-by: Naohiro Aota <naohiro.aota@xxxxxxx> --- common/hmzoned.c | 30 ++++++++++++++++++++++++++++++ common/hmzoned.h | 7 +++++++ ctree.h | 1 + transaction.c | 7 +++++++ 4 files changed, 45 insertions(+) diff --git a/common/hmzoned.c b/common/hmzoned.c index f268f360d8f7..53c9e1cfd472 100644 --- a/common/hmzoned.c +++ b/common/hmzoned.c @@ -907,10 +907,40 @@ out: if (num_conventional && emulated_offset > cache->alloc_offset) ret = -EIO; + if (!ret) + cache->write_offset = cache->alloc_offset; + free(alloc_offsets); return ret; } +bool btrfs_redirty_extent_buffer_for_hmzoned(struct btrfs_fs_info *fs_info, + u64 start, u64 end) +{ + u64 next; + struct btrfs_block_group_cache *cache; + struct extent_buffer *eb; + + if (!fs_info->fs_devices->hmzoned) + return false; + + cache = btrfs_lookup_first_block_group(fs_info, start); + BUG_ON(!cache); + + if (cache->key.objectid + cache->write_offset < start) { + next = cache->key.objectid + cache->write_offset; + BUG_ON(next + fs_info->nodesize > start); + eb = btrfs_find_create_tree_block(fs_info, next); + btrfs_mark_buffer_dirty(eb); + free_extent_buffer(eb); + return true; + } + + cache->write_offset += (end + 1 - start); + + return false; +} + #endif int btrfs_get_zone_info(int fd, const char *file, bool hmzoned, diff --git a/common/hmzoned.h b/common/hmzoned.h index a6b16d0ed35a..ee2fab311967 100644 --- a/common/hmzoned.h +++ b/common/hmzoned.h @@ -72,6 +72,8 @@ bool btrfs_check_allocatable_zones(struct btrfs_device *device, u64 pos, u64 num_bytes); int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *cache); +bool btrfs_redirty_extent_buffer_for_hmzoned(struct btrfs_fs_info *fs_info, + u64 start, u64 end); #else static inline bool zone_is_sequential(struct btrfs_zoned_device_info *zinfo, u64 bytenr) @@ -112,6 +114,11 @@ static inline int btrfs_load_block_group_zone_info( { return 0; } +static inline bool btrfs_redirty_extent_buffer_for_hmzoned( + struct btrfs_fs_info *fs_info, u64 start, u64 end) +{ + return false; +} #endif /* BTRFS_ZONED */ diff --git a/ctree.h b/ctree.h index fe72bd8921b0..7418627cade3 100644 --- a/ctree.h +++ b/ctree.h @@ -1125,6 +1125,7 @@ struct btrfs_block_group_cache { * enabled. */ u64 alloc_offset; + u64 write_offset; }; struct btrfs_device; diff --git a/transaction.c b/transaction.c index 45bb9e1f9de6..7b37f12f118f 100644 --- a/transaction.c +++ b/transaction.c @@ -18,6 +18,7 @@ #include "disk-io.h" #include "transaction.h" #include "delayed-ref.h" +#include "common/hmzoned.h" #include "common/messages.h" @@ -136,10 +137,16 @@ int __commit_transaction(struct btrfs_trans_handle *trans, int ret; while(1) { +again: ret = find_first_extent_bit(tree, 0, &start, &end, EXTENT_DIRTY); if (ret) break; + + if (btrfs_redirty_extent_buffer_for_hmzoned(fs_info, start, + end)) + goto again; + while(start <= end) { eb = find_first_extent_buffer(tree, start); BUG_ON(!eb || eb->start != start); -- 2.24.0