With current debugfs block layer infrastructure, we only get the total merge count which includes all the requests types, but we don't get the per request type merge count. This patch replaces the rq_merged variable into the rq_merged array so that we can track the per request type merged stats. Instead of having one number for all the requests which are merged, with this patch we can get the detailed number of the merged requests per request type:- READ 0 WRITE 0 FLUSH 0 DISCARD 0 SECURE_ERASE 0 ZONE_RESET 0 ZONE_RESET_ALL 0 WRITE_ZEROES 0 SCSI_IN 0 SCSI_OUT 0 DRV_IN 0 DRV_OUT 0 This is helpful in the understanding merging of the requests under different workloads and for the special requests such as discard which implements request specific merging mechanism. Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@xxxxxxx> --- block/blk-mq-debugfs.c | 17 +++++++++++++++-- block/blk-mq-sched.c | 2 +- block/blk-mq.h | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index b3f2ba483992..1e46f2cbf84e 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -679,8 +679,21 @@ static ssize_t ctx_dispatched_write(void *data, const char __user *buf, static int ctx_merged_show(void *data, struct seq_file *m) { struct blk_mq_ctx *ctx = data; + unsigned long *rm = ctx->rq_merged; + + seq_printf(m, "READ %8lu\n", rm[REQ_OP_READ]); + seq_printf(m, "WRITE %8lu\n", rm[REQ_OP_WRITE]); + seq_printf(m, "FLUSH %8lu\n", rm[REQ_OP_FLUSH]); + seq_printf(m, "DISCARD %8lu\n", rm[REQ_OP_DISCARD]); + seq_printf(m, "SECURE_ERASE %8lu\n", rm[REQ_OP_SECURE_ERASE]); + seq_printf(m, "ZONE_RESET %8lu\n", rm[REQ_OP_ZONE_RESET]); + seq_printf(m, "ZONE_RESET_ALL %8lu\n", rm[REQ_OP_ZONE_RESET_ALL]); + seq_printf(m, "WRITE_ZEROES %8lu\n", rm[REQ_OP_WRITE_ZEROES]); + seq_printf(m, "SCSI_IN %8lu\n", rm[REQ_OP_SCSI_IN]); + seq_printf(m, "SCSI_OUT %8lu\n", rm[REQ_OP_SCSI_OUT]); + seq_printf(m, "DRV_IN %8lu\n", rm[REQ_OP_DRV_IN]); + seq_printf(m, "DRV_OUT %8lu\n", rm[REQ_OP_DRV_OUT]); - seq_printf(m, "%lu\n", ctx->rq_merged); return 0; } @@ -689,7 +702,7 @@ static ssize_t ctx_merged_write(void *data, const char __user *buf, { struct blk_mq_ctx *ctx = data; - ctx->rq_merged = 0; + memset(ctx->rq_merged, 0, sizeof(ctx->rq_merged)); return count; } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index c9d183d6c499..664f8a056e96 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -314,7 +314,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q, lockdep_assert_held(&ctx->lock); if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { - ctx->rq_merged++; + ctx->rq_merged[bio_op(bio)]++; return true; } diff --git a/block/blk-mq.h b/block/blk-mq.h index 32c62c64e6c2..d485dde6e090 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -27,7 +27,7 @@ struct blk_mq_ctx { /* incremented at dispatch time */ unsigned long rq_dispatched[2]; - unsigned long rq_merged; + unsigned long rq_merged[REQ_OP_LAST]; /* incremented at completion time */ unsigned long ____cacheline_aligned_in_smp rq_completed[2]; -- 2.17.0