[PATCH 8/8] md/raid1: introduce wait_for_serialization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Guoqing Jiang <guoqing.jiang@xxxxxxxxxxxxxxx>

Previously, we call check_and_add_serial when serialization is
enabled for write IO, but it could allocate and free memory
back and forth.

Now, let's just get an element from memory pool with the new
function, then insert node to rb tree if no collision happens.

Signed-off-by: Guoqing Jiang <guoqing.jiang@xxxxxxxxxxxxxxx>
---
 drivers/md/raid1.c | 41 +++++++++++++++++++++--------------------
 1 file changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 08568edc1c5d..b1760930c73a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -158,33 +158,41 @@ find_overlap(struct rb_root *root, sector_t sector, unsigned long size)
 	return overlap;
 }
 
-static int check_and_add_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+				struct serial_info *wi, int idx)
 {
-	struct serial_info *wi;
 	unsigned long flags;
 	int ret = 0;
-	unsigned long size = (hi - lo) << 9;
-	struct mddev *mddev = rdev->mddev;
-	int idx = sector_to_idx(lo);
-
-	wi = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+	sector_t lo = r1_bio->sector;
+	unsigned int size = r1_bio->sectors << 9;
 
 	spin_lock_irqsave(&rdev->serial_rb_lock[idx], flags);
 	/* collision happened */
 	if (find_overlap(&rdev->serial_rb[idx], lo, size))
 		ret = -EBUSY;
-
-	if (!ret) {
+	else {
 		wi->sector = lo;
 		wi->size = size;
 		insert_interval(&rdev->serial_rb[idx], wi);
-	} else
-		mempool_free(wi, mddev->serial_info_pool);
+	}
 	spin_unlock_irqrestore(&rdev->serial_rb_lock[idx], flags);
 
 	return ret;
 }
 
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+	struct mddev *mddev = rdev->mddev;
+	struct serial_info *wi;
+	int idx = sector_to_idx(r1_bio->sector);
+
+	if (WARN_ON(!mddev->serial_info_pool))
+		return;
+	wi = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+	wait_event(rdev->serial_io_wait[idx],
+		   check_and_add_serial(rdev, r1_bio, wi, idx) == 0);
+}
+
 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
 {
 	struct serial_info *wi;
@@ -1439,8 +1447,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 	struct raid1_plug_cb *plug = NULL;
 	int first_clone;
 	int max_sectors;
-	sector_t lo, hi;
-	int idx = sector_to_idx(bio->bi_iter.bi_sector);
 
 	if (mddev_is_clustered(mddev) &&
 	     md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1468,8 +1474,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
 	r1_bio = alloc_r1bio(mddev, bio);
 	r1_bio->sectors = max_write_sectors;
-	lo = r1_bio->sector;
-	hi = r1_bio->sector + r1_bio->sectors;
 
 	if (conf->pending_count >= max_queued_requests) {
 		md_wakeup_thread(mddev->thread);
@@ -1614,14 +1618,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
 		if (r1_bio->behind_master_bio) {
 			if (test_bit(CollisionCheck, &rdev->flags))
-				wait_event(rdev->serial_io_wait[idx],
-					   check_and_add_serial(rdev, lo, hi)
-					   == 0);
+				wait_for_serialization(rdev, r1_bio);
 			if (test_bit(WriteMostly, &rdev->flags))
 				atomic_inc(&r1_bio->behind_remaining);
 		} else if (mddev->serialize_policy)
-			wait_event(rdev->serial_io_wait[idx],
-				   check_and_add_serial(rdev, lo, hi) == 0);
+			wait_for_serialization(rdev, r1_bio);
 
 		r1_bio->bios[i] = mbio;
 
-- 
2.17.1




[Index of Archives]     [Linux RAID Wiki]     [ATA RAID]     [Linux SCSI Target Infrastructure]     [Linux Block]     [Linux IDE]     [Linux SCSI]     [Linux Hams]     [Device Mapper]     [Device Mapper Cryptographics]     [Kernel]     [Linux Admin]     [Linux Net]     [GFS]     [RPM]     [git]     [Yosemite Forum]


  Powered by Linux