[MD PATCH 1/1] Use a new variable to count flighting sync requests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In new barrier codes, raise_barrier waits if conf->nr_pending[idx] is not zero.
After all the conditions are true, the resync request can go on be handled. But
it adds conf->nr_pending[idx] again. The next resync request hit the same bucket
idx need to wait the resync request which is submitted before. The performance
of resync/recovery is degraded.
So we should use a new variable to count sync requests which are in flight.

Suggested-by: Shaohua Li <shli@xxxxxxxxxx>
Suggested-by: Coly Li <colyli@xxxxxxx>
Signed-off-by: Xiao Ni <xni@xxxxxxxxxx>
---
 drivers/md/raid1.c | 14 +++++++++++---
 drivers/md/raid1.h |  1 +
 2 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a34f587..3c304ef 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -869,7 +869,7 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
 			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
 			    conf->resync_lock);
 
-	atomic_inc(&conf->nr_pending[idx]);
+	atomic_inc(&conf->nr_sync_pending[idx]);
 	spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -880,7 +880,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
 	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
 
 	atomic_dec(&conf->barrier[idx]);
-	atomic_dec(&conf->nr_pending[idx]);
+	atomic_dec(&conf->nr_sync_pending[idx]);
 	wake_up(&conf->wait_barrier);
 }
 
@@ -1018,7 +1018,8 @@ static int get_unqueued_pending(struct r1conf *conf)
 	int idx, ret;
 
 	for (ret = 0, idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
-		ret += atomic_read(&conf->nr_pending[idx]) -
+		ret += atomic_read(&conf->nr_pending[idx]) +
+			atomic_read(&conf->nr_sync_pending[idx]) -
 			atomic_read(&conf->nr_queued[idx]);
 
 	return ret;
@@ -3024,6 +3025,11 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 	if (!conf->nr_pending)
 		goto abort;
 
+	conf->nr_sync_pending = kcalloc(BARRIER_BUCKETS_NR,
+				   sizeof(atomic_t), GFP_KERNEL);
+	if (!conf->nr_sync_pending)
+		goto abort;
+
 	conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
 				   sizeof(atomic_t), GFP_KERNEL);
 	if (!conf->nr_waiting)
@@ -3136,6 +3142,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 		kfree(conf->mirrors);
 		safe_put_page(conf->tmppage);
 		kfree(conf->poolinfo);
+		kfree(conf->nr_sync_pending);
 		kfree(conf->nr_pending);
 		kfree(conf->nr_waiting);
 		kfree(conf->nr_queued);
@@ -3241,6 +3248,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
 	kfree(conf->mirrors);
 	safe_put_page(conf->tmppage);
 	kfree(conf->poolinfo);
+	kfree(conf->nr_sync_pending);
 	kfree(conf->nr_pending);
 	kfree(conf->nr_waiting);
 	kfree(conf->nr_queued);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index dd22a37..a3580ee 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -85,6 +85,7 @@ struct r1conf {
 	wait_queue_head_t	wait_barrier;
 	spinlock_t		resync_lock;
 	atomic_t		*nr_pending;
+	atomic_t		*nr_sync_pending;
 	atomic_t		*nr_waiting;
 	atomic_t		*nr_queued;
 	atomic_t		*barrier;
-- 
2.7.4

--
To unsubscribe from this list: send the line "unsubscribe linux-raid" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux RAID Wiki]     [ATA RAID]     [Linux SCSI Target Infrastructure]     [Linux Block]     [Linux IDE]     [Linux SCSI]     [Linux Hams]     [Device Mapper]     [Device Mapper Cryptographics]     [Kernel]     [Linux Admin]     [Linux Net]     [GFS]     [RPM]     [git]     [Yosemite Forum]


  Powered by Linux