This is a note to let you know that I've just added the patch titled btrfs: zoned: introduce block group context to btrfs_eb_write_context to the 6.5-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: btrfs-zoned-introduce-block-group-context-to-btrfs_e.patch and it can be found in the queue-6.5 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 3f40bc328cc1bf4e100d886abf45c56868f6f7da Author: Naohiro Aota <naohiro.aota@xxxxxxx> Date: Tue Aug 8 01:12:32 2023 +0900 btrfs: zoned: introduce block group context to btrfs_eb_write_context [ Upstream commit 7db94301a980c9da4168ac7ce61e7bde297306ba ] For metadata write out on the zoned mode, we call btrfs_check_meta_write_pointer() to check if an extent buffer to be written is aligned to the write pointer. We look up a block group containing the extent buffer for every extent buffer, which takes unnecessary effort as the writing extent buffers are mostly contiguous. Introduce "zoned_bg" to cache the block group working on. Also, while at it, rename "cache" to "block_group". Reviewed-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@xxxxxxx> Signed-off-by: Naohiro Aota <naohiro.aota@xxxxxxx> Reviewed-by: David Sterba <dsterba@xxxxxxxx> Signed-off-by: David Sterba <dsterba@xxxxxxxx> Stable-dep-of: 13bb483d32ab ("btrfs: zoned: activate metadata block group on write time") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b3bf2e2704888..c2be1561a52cb 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1881,7 +1881,6 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) { struct writeback_control *wbc = ctx->wbc; struct address_space *mapping = page->mapping; - struct btrfs_block_group *cache = NULL; struct extent_buffer *eb; int ret; @@ -1919,7 +1918,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) ctx->eb = eb; - if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) { + if (!btrfs_check_meta_write_pointer(eb->fs_info, ctx)) { /* * If for_sync, this hole will be filled with * trasnsaction commit. @@ -1933,18 +1932,15 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) } if (!lock_extent_buffer_for_io(eb, wbc)) { - btrfs_revert_meta_write_pointer(cache, eb); - if (cache) - btrfs_put_block_group(cache); + btrfs_revert_meta_write_pointer(ctx->zoned_bg, eb); free_extent_buffer(eb); return 0; } - if (cache) { + if (ctx->zoned_bg) { /* * Implies write in zoned mode. Mark the last eb in a block group. */ - btrfs_schedule_zone_finish_bg(cache, eb); - btrfs_put_block_group(cache); + btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); } write_one_eb(eb, wbc); free_extent_buffer(eb); @@ -2057,6 +2053,9 @@ int btree_write_cache_pages(struct address_space *mapping, ret = 0; if (!ret && BTRFS_FS_ERROR(fs_info)) ret = -EROFS; + + if (ctx.zoned_bg) + btrfs_put_block_group(ctx.zoned_bg); btrfs_zoned_meta_io_unlock(fs_info); return ret; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ecc1660007c11..f61b7896320a1 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -97,6 +97,8 @@ struct extent_buffer { struct btrfs_eb_write_context { struct writeback_control *wbc; struct extent_buffer *eb; + /* Block group @eb resides in. Only used for zoned mode. */ + struct btrfs_block_group *zoned_bg; }; /* diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index d9e6df2da272c..92f11176216b5 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1759,30 +1759,35 @@ void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered) } bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, - struct extent_buffer *eb, - struct btrfs_block_group **cache_ret) + struct btrfs_eb_write_context *ctx) { - struct btrfs_block_group *cache; - bool ret = true; + const struct extent_buffer *eb = ctx->eb; + struct btrfs_block_group *block_group = ctx->zoned_bg; if (!btrfs_is_zoned(fs_info)) return true; - cache = btrfs_lookup_block_group(fs_info, eb->start); - if (!cache) - return true; + if (block_group) { + if (block_group->start > eb->start || + block_group->start + block_group->length <= eb->start) { + btrfs_put_block_group(block_group); + block_group = NULL; + ctx->zoned_bg = NULL; + } + } - if (cache->meta_write_pointer != eb->start) { - btrfs_put_block_group(cache); - cache = NULL; - ret = false; - } else { - cache->meta_write_pointer = eb->start + eb->len; + if (!block_group) { + block_group = btrfs_lookup_block_group(fs_info, eb->start); + if (!block_group) + return true; + ctx->zoned_bg = block_group; } - *cache_ret = cache; + if (block_group->meta_write_pointer != eb->start) + return false; + block_group->meta_write_pointer = eb->start + eb->len; - return ret; + return true; } void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache, diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index 27322b926038c..49d5bd87245c5 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -59,8 +59,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans, bool btrfs_use_zone_append(struct btrfs_bio *bbio); void btrfs_record_physical_zoned(struct btrfs_bio *bbio); bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, - struct extent_buffer *eb, - struct btrfs_block_group **cache_ret); + struct btrfs_eb_write_context *ctx); void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache, struct extent_buffer *eb); int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length); @@ -190,8 +189,7 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio) } static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, - struct extent_buffer *eb, - struct btrfs_block_group **cache_ret) + struct btrfs_eb_write_context *ctx) { return true; }