[PATCH v2 1/3] md: factor out repeated sb alignment logic

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



super_1_load() and super_1_sync() both contain a copy of logic to pad
out the superblock size so that it is aligned on a logical block
boundary.  Factor into new function, and use round_up() rather than
explict bitmask-based calculation.

Signed-off-by: Christopher Unkel <cunkel@xxxxxxxxxxxxxx>
---
This series replaces the first patch of the previous series
(https://lkml.org/lkml/2020/10/22/1058), with the following changes:

1. Creates a helper function super_1_sb_length_ok().
2. Fixes operator placement style violation.
3. Covers case in super_1_sync().
4. Refactors duplicate logic.
5. Covers a case in existing code where aligned superblock could
   run into bitmap.

drivers/md/md.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 98bac4f304ae..d6a55ca1d52e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1646,6 +1646,17 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
 	return cpu_to_le32(csum);
 }
 
+/*
+ * set rdev->sb_size to that required for number of devices in array
+ * with appropriate padding to underlying sectors
+ */
+static void
+super_1_set_rdev_sb_size(struct md_rdev *rdev, int max_dev)
+{
+	int sb_size = max_dev * 2 + 256;
+	rdev->sb_size = round_up(sb_size, bdev_logical_block_size(rdev->bdev));
+}
+
 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
 {
 	struct mdp_superblock_1 *sb;
@@ -1653,7 +1664,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
 	sector_t sb_start;
 	sector_t sectors;
 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
-	int bmask;
 	bool spare_disk = true;
 
 	/*
@@ -1720,10 +1730,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
 
-	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
-	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
-	if (rdev->sb_size & bmask)
-		rdev->sb_size = (rdev->sb_size | bmask) + 1;
+	super_1_set_rdev_sb_size(rdev, le32_to_cpu(sb->max_dev));
 
 	if (minor_version
 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
@@ -2132,12 +2139,8 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
 			max_dev = rdev2->desc_nr+1;
 
 	if (max_dev > le32_to_cpu(sb->max_dev)) {
-		int bmask;
 		sb->max_dev = cpu_to_le32(max_dev);
-		rdev->sb_size = max_dev * 2 + 256;
-		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
-		if (rdev->sb_size & bmask)
-			rdev->sb_size = (rdev->sb_size | bmask) + 1;
+		super_1_set_rdev_sb_size(rdev, max_dev);
 	} else
 		max_dev = le32_to_cpu(sb->max_dev);
 
-- 
2.17.1




[Index of Archives]     [Linux RAID Wiki]     [ATA RAID]     [Linux SCSI Target Infrastructure]     [Linux Block]     [Linux IDE]     [Linux SCSI]     [Linux Hams]     [Device Mapper]     [Device Mapper Cryptographics]     [Kernel]     [Linux Admin]     [Linux Net]     [GFS]     [RPM]     [git]     [Yosemite Forum]


  Powered by Linux