Re: [PATCH 23/34] btrfs: allow btrfs_submit_bio to split bios

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Jan 21, 2023 at 07:50:20AM +0100, Christoph Hellwig wrote:
> Currently the I/O submitters have to split bios according to the
> chunk stripe boundaries.  This leads to extra lookups in the extent
> trees and a lot of boilerplate code.
> 
> To drop this requirement, split the bio when __btrfs_map_block
> returns a mapping that is smaller than the requested size and
> keep a count of pending bios in the original btrfs_bio so that
> the upper level completion is only invoked when all clones have
> completed.
> 
> Based on a patch from Qu Wenruo.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>
> Reviewed-by: Josef Bacik <josef@xxxxxxxxxxxxxx>
> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@xxxxxxx>
> Reviewed-by: Qu Wenruo <wqu@xxxxxxxx>
> ---
>  fs/btrfs/bio.c | 108 ++++++++++++++++++++++++++++++++++++++++---------
>  fs/btrfs/bio.h |   1 +
>  2 files changed, 91 insertions(+), 18 deletions(-)
> 
> diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
> index c7522ac7e0e71c..ff42b783902140 100644
> --- a/fs/btrfs/bio.c
> +++ b/fs/btrfs/bio.c
> @@ -17,6 +17,7 @@
>  #include "file-item.h"
>  
>  static struct bio_set btrfs_bioset;
> +static struct bio_set btrfs_clone_bioset;
>  static struct bio_set btrfs_repair_bioset;
>  static mempool_t btrfs_failed_bio_pool;
>  
> @@ -38,6 +39,7 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio,
>  	bbio->inode = inode;
>  	bbio->end_io = end_io;
>  	bbio->private = private;
> +	atomic_set(&bbio->pending_ios, 1);
>  }
>  
>  /*
> @@ -75,6 +77,58 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
>  	return bio;
>  }
>  
> +static struct bio *btrfs_split_bio(struct bio *orig, u64 map_length)
> +{
> +	struct btrfs_bio *orig_bbio = btrfs_bio(orig);
> +	struct bio *bio;
> +
> +	bio = bio_split(orig, map_length >> SECTOR_SHIFT, GFP_NOFS,
> +			&btrfs_clone_bioset);
> +	btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode, NULL, orig_bbio);
> +
> +	btrfs_bio(bio)->file_offset = orig_bbio->file_offset;
> +	if (!(orig->bi_opf & REQ_BTRFS_ONE_ORDERED))
> +		orig_bbio->file_offset += map_length;
> +
> +	atomic_inc(&orig_bbio->pending_ios);
> +	return bio;
> +}
> +
> +static void btrfs_orig_write_end_io(struct bio *bio);
> +static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
> +				       struct btrfs_bio *orig_bbio)
> +{
> +	/*
> +	 * For writes btrfs tolerates nr_mirrors - 1 write failures, so we
> +	 * can't just blindly propagate a write failure here.
> +	 * Instead increment the error count in the original I/O context so
> +	 * that it is guaranteed to be larger than the error tolerance.
> +	 */
> +	if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
> +		struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
> +		struct btrfs_io_context *orig_bioc = orig_stripe->bioc;
> +
> +		atomic_add(orig_bioc->max_errors, &orig_bioc->error);
> +	} else {
> +		orig_bbio->bio.bi_status = bbio->bio.bi_status;
> +	}
> +}
> +
> +static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio)
> +{
> +	if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
> +		struct btrfs_bio *orig_bbio = bbio->private;
> +
> +		if (bbio->bio.bi_status)
> +			btrfs_bbio_propagate_error(bbio, orig_bbio);
> +		bio_put(&bbio->bio);
> +		bbio = orig_bbio;
> +	}
> +
> +	if (atomic_dec_and_test(&bbio->pending_ios))
> +		bbio->end_io(bbio);
> +}
> +
>  static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
>  {
>  	if (cur_mirror == fbio->num_copies)
> @@ -92,7 +146,7 @@ static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
>  static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
>  {
>  	if (atomic_dec_and_test(&fbio->repair_count)) {
> -		fbio->bbio->end_io(fbio->bbio);
> +		btrfs_orig_bbio_end_io(fbio->bbio);
>  		mempool_free(fbio, &btrfs_failed_bio_pool);
>  	}
>  }
> @@ -232,7 +286,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio,
>  	if (unlikely(fbio))
>  		btrfs_repair_done(fbio);
>  	else
> -		bbio->end_io(bbio);
> +		btrfs_orig_bbio_end_io(bbio);
>  }
>  
>  static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
> @@ -286,7 +340,7 @@ static void btrfs_simple_end_io(struct bio *bio)
>  	} else {
>  		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
>  			btrfs_record_physical_zoned(bbio);
> -		bbio->end_io(bbio);
> +		btrfs_orig_bbio_end_io(bbio);
>  	}
>  }
>  
> @@ -300,7 +354,7 @@ static void btrfs_raid56_end_io(struct bio *bio)
>  	if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META))
>  		btrfs_check_read_bio(bbio, NULL);
>  	else
> -		bbio->end_io(bbio);
> +		btrfs_orig_bbio_end_io(bbio);
>  
>  	btrfs_put_bioc(bioc);
>  }
> @@ -327,7 +381,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
>  	else
>  		bio->bi_status = BLK_STS_OK;
>  
> -	bbio->end_io(bbio);
> +	btrfs_orig_bbio_end_io(bbio);
>  	btrfs_put_bioc(bioc);
>  }
>  
> @@ -492,7 +546,7 @@ static void run_one_async_done(struct btrfs_work *work)
>  
>  	/* If an error occurred we just want to clean up the bio and move on */
>  	if (bio->bi_status) {
> -		btrfs_bio_end_io(async->bbio, bio->bi_status);
> +		btrfs_orig_bbio_end_io(async->bbio);
>  		return;
>  	}
>  
> @@ -567,8 +621,8 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
>  	return true;
>  }
>  
> -void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
> -		      int mirror_num)
> +static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio,
> +			       int mirror_num)
>  {
>  	struct btrfs_bio *bbio = btrfs_bio(bio);
>  	u64 logical = bio->bi_iter.bi_sector << 9;
> @@ -587,11 +641,10 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
>  		goto fail;
>  	}
>  
> +	map_length = min(map_length, length);
>  	if (map_length < length) {
> -		btrfs_crit(fs_info,
> -			   "mapping failed logical %llu bio len %llu len %llu",
> -			   logical, length, map_length);
> -		BUG();
> +		bio = btrfs_split_bio(bio, map_length);
> +		bbio = btrfs_bio(bio);
>  	}
>  
>  	/*
> @@ -602,14 +655,14 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
>  		bbio->saved_iter = bio->bi_iter;
>  		ret = btrfs_lookup_bio_sums(bbio);
>  		if (ret)
> -			goto fail;
> +			goto fail_put_bio;
>  	}
>  
>  	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
>  		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
>  			ret = btrfs_extract_ordered_extent(btrfs_bio(bio));
>  			if (ret)
> -				goto fail;
> +				goto fail_put_bio;
>  		}
>  
>  		/*
> @@ -621,20 +674,33 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
>  		    !btrfs_is_data_reloc_root(bbio->inode->root)) {
>  			if (should_async_write(bbio) &&
>  			    btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
> -				return;
> +				goto done;
>  
>  			ret = btrfs_bio_csum(bbio);
>  			if (ret)
> -				goto fail;
> +				goto fail_put_bio;
>  		}
>  	}
>  
>  	__btrfs_submit_bio(bio, bioc, &smap, mirror_num);
> -	return;
> +done:
> +	return map_length == length;
>  
> +fail_put_bio:
> +	if (map_length < length)
> +		bio_put(bio);

This is causing a panic in btrfs/125 because you set bbio to
btrfs_bio(split_bio), which has a NULL end_io.  You need something like the
following so that we're ending the correct bbio.  Thanks,

Josef

diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 5d4b67fc44f4..f3a357c48e69 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -607,6 +607,7 @@ static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio,
 			       int mirror_num)
 {
 	struct btrfs_bio *bbio = btrfs_bio(bio);
+	struct btrfs_bio *orig_bbio = bbio;
 	u64 logical = bio->bi_iter.bi_sector << 9;
 	u64 length = bio->bi_iter.bi_size;
 	u64 map_length = length;
@@ -673,7 +674,7 @@ static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio,
 		bio_put(bio);
 fail:
 	btrfs_bio_counter_dec(fs_info);
-	btrfs_bio_end_io(bbio, ret);
+	btrfs_bio_end_io(orig_bbio, ret);
 	/* Do not submit another chunk */
 	return true;
 }



[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux