On 6/17/20 1:26 PM, Kashyap Desai wrote:
->queued is increased only and not decreased just for debug purpose so
->far, so
it can't be relied for this purpose.
Thanks. I overlooked that that it is only incremental counter.
One approach is to add one similar counter, and maintain it by
scheduler's
insert/dispatch callback.
I tried below and I see performance is on expected range.
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index fdcc2c1..ea201d0 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -485,6 +485,7 @@ void blk_mq_sched_insert_request(struct request *rq,
bool at_head,
list_add(&rq->queuelist, &list);
e->type->ops.insert_requests(hctx, &list, at_head);
+ atomic_inc(&hctx->elevator_queued);
} else {
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -511,8 +512,10 @@ void blk_mq_sched_insert_requests(struct
blk_mq_hw_ctx *hctx,
percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
- if (e && e->type->ops.insert_requests)
+ if (e && e->type->ops.insert_requests) {
e->type->ops.insert_requests(hctx, list, false);
+ atomic_inc(&hctx->elevator_queued);
+ }
else {
/*
* try to issue requests directly if the hw queue isn't
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 126021f..946b47a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -74,6 +74,13 @@ static inline bool blk_mq_sched_has_work(struct
blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
+ /* If current hctx has not queued any request, there is no need to
run.
+ * blk_mq_run_hw_queue() on hctx which has queued IO will handle
+ * running specific hctx.
+ */
+ if (!atomic_read(&hctx->elevator_queued))
+ return false;
+
if (e && e->type->ops.has_work)
return e->type->ops.has_work(hctx);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f73a2f9..48f1824 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -517,8 +517,10 @@ void blk_mq_free_request(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & RQF_ELVPRIV) {
- if (e && e->type->ops.finish_request)
+ if (e && e->type->ops.finish_request) {
e->type->ops.finish_request(rq);
+ atomic_dec(&hctx->elevator_queued);
+ }
if (rq->elv.icq) {
put_io_context(rq->elv.icq->ioc);
rq->elv.icq = NULL;
@@ -2571,6 +2573,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct
blk_mq_tag_set *set,
goto free_hctx;
atomic_set(&hctx->nr_active, 0);
+ atomic_set(&hctx->elevator_queued, 0);
if (node == NUMA_NO_NODE)
node = set->numa_node;
hctx->numa_node = node;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 66711c7..ea1ddb1 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -139,6 +139,10 @@ struct blk_mq_hw_ctx {
* shared across request queues.
*/
atomic_t nr_active;
+ /**
+ * @elevator_queued: Number of queued requests on hctx.
+ */
+ atomic_t elevator_queued;
/** @cpuhp_online: List to store request if CPU is going to die */
struct hlist_node cpuhp_online;
Would it make sense to move it into the elevator itself?
It's a bit odd having a value 'elevator_queued' with no direct reference
to the elevator...
Cheers,
Hannes
--
Dr. Hannes Reinecke Teamlead Storage & Networking
hare@xxxxxxx +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer