This is a note to let you know that I've just added the patch titled btrfs: return bool from lock_extent_buffer_for_io to the 6.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: btrfs-return-bool-from-lock_extent_buffer_for_io.patch and it can be found in the queue-6.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 2399a7bf8b8bc98025c63b245c1592ef15eea824 Author: Christoph Hellwig <hch@xxxxxx> Date: Wed May 3 17:24:30 2023 +0200 btrfs: return bool from lock_extent_buffer_for_io [ Upstream commit 9fdd160160f002ac2604c5b8f90598febad44ae7 ] lock_extent_buffer_for_io never returns a negative error value, so switch the return value to a simple bool. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@xxxxxxx> Reviewed-by: Josef Bacik <josef@xxxxxxxxxxxxxx> Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: David Sterba <dsterba@xxxxxxxx> [ keep noinline_for_stack ] Signed-off-by: David Sterba <dsterba@xxxxxxxx> Stable-dep-of: 7027f87108ce ("btrfs: don't treat zoned writeback as being from an async helper thread") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 51f0f28fb9b2c..28d0792359968 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1671,18 +1671,17 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb) * * May try to flush write bio if we can't get the lock. * - * Return 0 if the extent buffer doesn't need to be submitted. - * (E.g. the extent buffer is not dirty) - * Return >0 is the extent buffer is submitted to bio. - * Return <0 if something went wrong, no page is locked. + * Return %false if the extent buffer doesn't need to be submitted (e.g. the + * extent buffer is not dirty) + * Return %true is the extent buffer is submitted to bio. */ -static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, +static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_fs_info *fs_info = eb->fs_info; int i, num_pages; int flush = 0; - int ret = 0; + bool ret = false; if (!btrfs_try_tree_write_lock(eb)) { submit_write_bio(bio_ctrl, 0); @@ -1693,7 +1692,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { btrfs_tree_unlock(eb); if (bio_ctrl->wbc->sync_mode != WB_SYNC_ALL) - return 0; + return false; if (!flush) { submit_write_bio(bio_ctrl, 0); flush = 1; @@ -1720,7 +1719,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, fs_info->dirty_metadata_batch); - ret = 1; + ret = true; } else { spin_unlock(&eb->refs_lock); } @@ -2053,7 +2052,6 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) u64 page_start = page_offset(page); int bit_start = 0; int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; - int ret; /* Lock and write each dirty extent buffers in the range */ while (bit_start < fs_info->subpage_info->bitmap_nr_bits) { @@ -2099,25 +2097,13 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) if (!eb) continue; - ret = lock_extent_buffer_for_io(eb, bio_ctrl); - if (ret == 0) { - free_extent_buffer(eb); - continue; + if (lock_extent_buffer_for_io(eb, bio_ctrl)) { + write_one_subpage_eb(eb, bio_ctrl); + submitted++; } - if (ret < 0) { - free_extent_buffer(eb); - goto cleanup; - } - write_one_subpage_eb(eb, bio_ctrl); free_extent_buffer(eb); - submitted++; } return submitted; - -cleanup: - /* We hit error, end bio for the submitted extent buffers */ - submit_write_bio(bio_ctrl, ret); - return ret; } /* @@ -2196,8 +2182,7 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, *eb_context = eb; - ret = lock_extent_buffer_for_io(eb, bio_ctrl); - if (ret <= 0) { + if (!lock_extent_buffer_for_io(eb, bio_ctrl)) { btrfs_revert_meta_write_pointer(cache, eb); if (cache) btrfs_put_block_group(cache);