[RFC PATCH] md: add io statistics

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Now, md doesn't implement 'inflight' for /sys/block/md.
Thought, we can view 'inflight' for each device in raid array,
these information cannot satisfy for some situations.
For example, IOs are completed by raid devices and return to
md for final processing. Then, if these IOs is blocked, we cannot
detect this by just viewing 'inflight' of raid devices, which is "0 0".

We implement statistics by adding two new function md_io_acct_start()
and md_io_acct_end(). When a bio is issused to raid device, we account
by calling md_io_acct_start(). And md_io_acct_end() is called when
the bio is free or reset.

Any suggestion?

Signed-off-by: Yufen Yu <yuyufen@xxxxxxxxxx>
---
 drivers/md/md.c     | 28 ++++++++++++++++++++++------
 drivers/md/md.h     |  3 +++
 drivers/md/raid1.c  | 23 +++++++++++++++++------
 drivers/md/raid10.c | 25 ++++++++++++++++++-------
 drivers/md/raid5.c  | 14 +++++++++++---
 5 files changed, 71 insertions(+), 22 deletions(-)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index dc2b6a5bfbe4..39aea103fd17 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -281,6 +281,28 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
 	return true;
 }
 
+void md_io_acct_start(struct mddev *mddev, int rw, struct bio *bio) {
+	if (!mddev->gendisk)
+		return;
+
+	generic_start_io_acct(mddev->queue, rw, bio_sectors(bio), &mddev->gendisk->part0);
+}
+
+void md_io_acct_end(struct mddev *mddev, int rw) {
+	int cpu;
+	struct hd_struct *part;
+
+	if (!mddev->gendisk)
+		return;
+
+	cpu = part_stat_lock();
+	part = &mddev->gendisk->part0;
+	part_round_stats(mddev->queue, cpu, part);
+	part_dec_in_flight(mddev->queue, part, rw);
+
+	part_stat_unlock();
+}
+
 void md_handle_request(struct mddev *mddev, struct bio *bio)
 {
 check_suspended:
@@ -317,7 +339,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 	const int rw = bio_data_dir(bio);
 	struct mddev *mddev = q->queuedata;
 	unsigned int sectors;
-	int cpu;
 
 	blk_queue_split(q, &bio);
 
@@ -342,11 +363,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 
 	md_handle_request(mddev, bio);
 
-	cpu = part_stat_lock();
-	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
-	part_stat_unlock();
-
 	return BLK_QC_T_NONE;
 }
 
diff --git a/drivers/md/md.h b/drivers/md/md.h
index fbc925cce810..45659f88db96 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -714,6 +714,9 @@ extern void md_kick_rdev_from_array(struct md_rdev * rdev);
 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
 
+extern void md_io_acct_start(struct mddev *mddev, int rw, struct bio *bio);
+extern void md_io_acct_end(struct mddev *mddev, int rw);
+
 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
 {
 	int faulty = test_bit(Faulty, &rdev->flags);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fe872dc6712e..f9f1d80f213f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -207,11 +207,14 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
 {
 	int i;
+	struct mddev *mddev= r1_bio->mddev;
 
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		struct bio **bio = r1_bio->bios + i;
-		if (!BIO_SPECIAL(*bio))
+		if (!BIO_SPECIAL(*bio)) {
+			md_io_acct_end(mddev, bio_data_dir(*bio));
 			bio_put(*bio);
+		}
 		*bio = NULL;
 	}
 }
@@ -417,6 +420,7 @@ static void raid1_end_write_request(struct bio *bio)
 	int mirror = find_bio_disk(r1_bio, bio);
 	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
 	bool discard_error;
+	struct mddev *mddev = r1_bio->mddev;
 
 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
@@ -516,8 +520,10 @@ static void raid1_end_write_request(struct bio *bio)
 	 */
 	r1_bio_write_done(r1_bio);
 
-	if (to_put)
+	if (to_put) {
+		md_io_acct_end(mddev, WRITE);
 		bio_put(to_put);
+	}
 }
 
 static sector_t align_to_barrier_unit_end(sector_t start_sector,
@@ -1280,9 +1286,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
 	        read_bio->bi_opf |= MD_FAILFAST;
 	read_bio->bi_private = r1_bio;
 
-	if (mddev->gendisk)
-	        trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
-				disk_devt(mddev->gendisk), r1_bio->sector);
+	if (mddev->gendisk) {
+		md_io_acct_start(mddev, READ, read_bio);
+		trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
+			disk_devt(mddev->gendisk), r1_bio->sector);
+	}
 
 	generic_make_request(read_bio);
 }
@@ -1491,10 +1499,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
 		atomic_inc(&r1_bio->remaining);
 
-		if (mddev->gendisk)
+		if (mddev->gendisk) {
+			md_io_acct_start(mddev, WRITE, mbio);
 			trace_block_bio_remap(mbio->bi_disk->queue,
 					      mbio, disk_devt(mddev->gendisk),
 					      r1_bio->sector);
+		}
 		/* flush_pending_writes() needs access to the rdev so...*/
 		mbio->bi_disk = (void *)conf->mirrors[i].rdev;
 
@@ -2452,6 +2462,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 
 	bio = r1_bio->bios[r1_bio->read_disk];
 	bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
+	md_io_acct_end(mddev, READ);
 	bio_put(bio);
 	r1_bio->bios[r1_bio->read_disk] = NULL;
 
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c5e6c60fc0d4..e1ae0d6818d1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -273,11 +273,14 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 {
 	int i;
+	struct mddev *mddev = r10_bio->mddev;
 
 	for (i = 0; i < conf->copies; i++) {
 		struct bio **bio = & r10_bio->devs[i].bio;
-		if (!BIO_SPECIAL(*bio))
+		if (!BIO_SPECIAL(*bio)) {
+			md_io_acct_end(mddev, bio_data_dir(*bio));
 			bio_put(*bio);
+		}
 		*bio = NULL;
 		bio = &r10_bio->devs[i].repl_bio;
 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
@@ -470,6 +473,7 @@ static void raid10_end_write_request(struct bio *bio)
 	struct md_rdev *rdev = NULL;
 	struct bio *to_put = NULL;
 	bool discard_error;
+	struct mddev *mddev = r10_bio->mddev;
 
 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
@@ -563,8 +567,10 @@ static void raid10_end_write_request(struct bio *bio)
 	one_write_done(r10_bio);
 	if (dec_rdev)
 		rdev_dec_pending(rdev, conf->mddev);
-	if (to_put)
+	if (to_put) {
+		md_io_acct_end(mddev, WRITE);
 		bio_put(to_put);
+	}
 }
 
 /*
@@ -1228,10 +1234,12 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
 	        read_bio->bi_opf |= MD_FAILFAST;
 	read_bio->bi_private = r10_bio;
 
-	if (mddev->gendisk)
-	        trace_block_bio_remap(read_bio->bi_disk->queue,
-	                              read_bio, disk_devt(mddev->gendisk),
-	                              r10_bio->sector);
+	if (mddev->gendisk) {
+		md_io_acct_start(mddev, READ, read_bio);
+		trace_block_bio_remap(read_bio->bi_disk->queue,
+							  read_bio, disk_devt(mddev->gendisk),
+							  r10_bio->sector);
+	}
 	generic_make_request(read_bio);
 	return;
 }
@@ -1278,10 +1286,12 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
 		mbio->bi_opf |= MD_FAILFAST;
 	mbio->bi_private = r10_bio;
 
-	if (conf->mddev->gendisk)
+	if (conf->mddev->gendisk) {
+		md_io_acct_start(mddev, WRITE, mbio);
 		trace_block_bio_remap(mbio->bi_disk->queue,
 				      mbio, disk_devt(conf->mddev->gendisk),
 				      r10_bio->sector);
+	}
 	/* flush_pending_writes() needs access to the rdev so...*/
 	mbio->bi_disk = (void *)rdev;
 
@@ -2621,6 +2631,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 	 * frozen.
 	 */
 	bio = r10_bio->devs[slot].bio;
+	md_io_acct_end(mddev, READ);
 	bio_put(bio);
 	r10_bio->devs[slot].bio = NULL;
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b5d2601483e3..1fb6395ad08f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1148,10 +1148,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 			if (rrdev)
 				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
-			if (conf->mddev->gendisk)
+			if (conf->mddev->gendisk) {
+				md_io_acct_start(conf->mddev, bio_data_dir(bi), bi);
 				trace_block_bio_remap(bi->bi_disk->queue,
 						      bi, disk_devt(conf->mddev->gendisk),
 						      sh->dev[i].sector);
+			}
 			if (should_defer && op_is_write(op))
 				bio_list_add(&pending_bios, bi);
 			else
@@ -1196,10 +1198,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 			 */
 			if (op == REQ_OP_DISCARD)
 				rbi->bi_vcnt = 0;
-			if (conf->mddev->gendisk)
+			if (conf->mddev->gendisk) {
+				md_io_acct_start(conf->mddev, bio_data_dir(rbi), rbi);
 				trace_block_bio_remap(rbi->bi_disk->queue,
 						      rbi, disk_devt(conf->mddev->gendisk),
 						      sh->dev[i].sector);
+			}
 			if (should_defer && op_is_write(op))
 				bio_list_add(&pending_bios, rbi);
 			else
@@ -2585,6 +2589,7 @@ static void raid5_end_read_request(struct bio * bi)
 		}
 	}
 	rdev_dec_pending(rdev, conf->mddev);
+	md_io_acct_end(conf->mddev, READ);
 	bio_reset(bi);
 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
 	set_bit(STRIPE_HANDLE, &sh->state);
@@ -2660,6 +2665,7 @@ static void raid5_end_write_request(struct bio *bi)
 	if (sh->batch_head && bi->bi_status && !replacement)
 		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
+	md_io_acct_end(conf->mddev, WRITE);
 	bio_reset(bi);
 	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -5255,10 +5261,12 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 		atomic_inc(&conf->active_aligned_reads);
 		spin_unlock_irq(&conf->device_lock);
 
-		if (mddev->gendisk)
+		if (mddev->gendisk) {
+			md_io_acct_start(mddev, READ, align_bi);
 			trace_block_bio_remap(align_bi->bi_disk->queue,
 					      align_bi, disk_devt(mddev->gendisk),
 					      raid_bio->bi_iter.bi_sector);
+		}
 		generic_make_request(align_bi);
 		return 1;
 	} else {
-- 
2.13.6

--
To unsubscribe from this list: send the line "unsubscribe linux-raid" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux RAID Wiki]     [ATA RAID]     [Linux SCSI Target Infrastructure]     [Linux Block]     [Linux IDE]     [Linux SCSI]     [Linux Hams]     [Device Mapper]     [Device Mapper Cryptographics]     [Kernel]     [Linux Admin]     [Linux Net]     [GFS]     [RPM]     [git]     [Yosemite Forum]


  Powered by Linux