Re: [PATCH 26/40] btrfs: refactor btrfs_map_bio

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 2022/3/22 23:55, Christoph Hellwig wrote:
Use a label for common cleanup, untangle the conditionals for parity
RAID and move all per-stripe handling into submit_stripe_bio.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
  fs/btrfs/volumes.c | 88 ++++++++++++++++++++++------------------------
  1 file changed, 42 insertions(+), 46 deletions(-)

diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9a1eb1166d72f..1cf0914b33847 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6744,10 +6744,30 @@ static void btrfs_end_bio(struct bio *bio)
  		btrfs_end_bioc(bioc, true);
  }

-static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
-			      u64 physical, struct btrfs_device *dev)
+static void submit_stripe_bio(struct btrfs_io_context *bioc,
+		struct bio *orig_bio, int dev_nr, bool clone)
  {
  	struct btrfs_fs_info *fs_info = bioc->fs_info;
+	struct btrfs_device *dev = bioc->stripes[dev_nr].dev;
+	u64 physical = bioc->stripes[dev_nr].physical;
+	struct bio *bio;
+
+	if (!dev || !dev->bdev ||
+	    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
+	    (btrfs_op(orig_bio) == BTRFS_MAP_WRITE &&
+	     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
+		atomic_inc(&bioc->error);
+		if (atomic_dec_and_test(&bioc->stripes_pending))
+			btrfs_end_bioc(bioc, false);
+		return;
+	}
+
+	if (clone) {
+		bio = btrfs_bio_clone(dev->bdev, orig_bio);
+	} else {
+		bio = orig_bio;
+		bio_set_dev(bio, dev->bdev);
+	}

  	bio->bi_private = bioc;
  	btrfs_bio(bio)->device = dev;
@@ -6782,46 +6802,40 @@ static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
  blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
  			   int mirror_num)
  {
-	struct btrfs_device *dev;
-	struct bio *first_bio = bio;
  	u64 logical = bio->bi_iter.bi_sector << 9;
-	u64 length = 0;
-	u64 map_length;
+	u64 length = bio->bi_iter.bi_size;
+	u64 map_length = length;
  	int ret;
  	int dev_nr;
  	int total_devs;
  	struct btrfs_io_context *bioc = NULL;

-	length = bio->bi_iter.bi_size;
-	map_length = length;
-
  	btrfs_bio_counter_inc_blocked(fs_info);
  	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
  				&map_length, &bioc, mirror_num, 1);
-	if (ret) {
-		btrfs_bio_counter_dec(fs_info);
-		return errno_to_blk_status(ret);
-	}
+	if (ret)
+		goto out_dec;

  	total_devs = bioc->num_stripes;
-	bioc->orig_bio = first_bio;
-	bioc->private = first_bio->bi_private;
-	bioc->end_io = first_bio->bi_end_io;
+	bioc->orig_bio = bio;
+	bioc->private = bio->bi_private;
+	bioc->end_io = bio->bi_end_io;
  	atomic_set(&bioc->stripes_pending, bioc->num_stripes);

-	if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
-	    ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
-		/* In this case, map_length has been set to the length of
-		   a single stripe; not the whole write */
+	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+		/*
+		 * In this case, map_length has been set to the length of a
+		 * single stripe; not the whole write.
+		 */
  		if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
  			ret = raid56_parity_write(bio, bioc, map_length);
-		} else {
+			goto out_dec;
+		}
+		if (mirror_num > 1) {
  			ret = raid56_parity_recover(bio, bioc, map_length,
  						    mirror_num, 1);
+			goto out_dec;
  		}
-
-		btrfs_bio_counter_dec(fs_info);
-		return errno_to_blk_status(ret);

Can we add some extra comment on the fall through cases?

The read path still needs to go through the regular routine.

Otherwise looks good to me.

Thanks,
Qu

  	}

  	if (map_length < length) {
@@ -6831,29 +6845,11 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
  		BUG();
  	}

-	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
-		dev = bioc->stripes[dev_nr].dev;
-		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
-						   &dev->dev_state) ||
-		    (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
-		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
-			atomic_inc(&bioc->error);
-			if (atomic_dec_and_test(&bioc->stripes_pending))
-				btrfs_end_bioc(bioc, false);
-			continue;
-		}
-
-		if (dev_nr < total_devs - 1) {
-			bio = btrfs_bio_clone(dev->bdev, first_bio);
-		} else {
-			bio = first_bio;
-			bio_set_dev(bio, dev->bdev);
-		}
-
-		submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
-	}
+	for (dev_nr = 0; dev_nr < total_devs; dev_nr++)
+		submit_stripe_bio(bioc, bio, dev_nr, dev_nr < total_devs - 1);
+out_dec:
  	btrfs_bio_counter_dec(fs_info);
-	return BLK_STS_OK;
+	return errno_to_blk_status(ret);
  }

  static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux