From: Niklas Cassel <niklas.cassel@xxxxxxx> Currently, __blk_mq_alloc_request() calls ops.prepare_request and sets RQF_ELVPRIV. Therefore, (if the request is not a flush) the RQF_ELVPRIV flag will be set for the request in blk_mq_submit_bio(), regardless if the request was submitted to a scheduler, or bypassed the scheduler. Later, blk_mq_free_request() checks if the RQF_ELVPRIV flag is set, if it is, the ops.finish_request callback will be called. The problem with this is that the finish_request scheduler callback will be called for requests that bypassed the scheduler. Fix this by calling the scheduler ops.prepare_request callback, and set the RQF_ELVPRIV flag only immediately before calling the insert callback. This way, we can reuse the flag, and we don't need to add any additional checks in blk_mq_sched_requeue_request() and blk_mq_free_request(), since they already only do the callback if RQF_ELVPRIV is set, and the existing .prepare_request callbacks should still work without needing modifications. Signed-off-by: Niklas Cassel <niklas.cassel@xxxxxxx> --- block/blk-mq-sched.c | 20 ++++++++++++++++++++ block/blk-mq.c | 13 ------------- include/linux/blkdev.h | 3 ++- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0f006cabfd91..eacacb7088c1 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -466,6 +466,14 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, if (e) { LIST_HEAD(list); + rq->elv.icq = NULL; + if (e && e->type->ops.prepare_request) { + if (e->type->icq_cache) + blk_mq_sched_assign_ioc(rq); + + e->type->ops.prepare_request(rq); + rq->rq_flags |= RQF_ELVPRIV; + } list_add(&rq->queuelist, &list); e->type->ops.insert_requests(hctx, &list, at_head); } else { @@ -495,6 +503,18 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, e = hctx->queue->elevator; if (e) { + struct request *rq; + + list_for_each_entry(rq, list, queuelist) { + rq->elv.icq = NULL; + if (e && e->type->ops.prepare_request) { + if (e->type->icq_cache) + blk_mq_sched_assign_ioc(rq); + + e->type->ops.prepare_request(rq); + rq->rq_flags |= RQF_ELVPRIV; + } + } e->type->ops.insert_requests(hctx, list, false); } else { /* diff --git a/block/blk-mq.c b/block/blk-mq.c index 9d4fdc2be88a..3527dd9fd10e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -328,19 +328,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; refcount_set(&rq->ref, 1); - if (!op_is_flush(data->cmd_flags)) { - struct elevator_queue *e = data->q->elevator; - - rq->elv.icq = NULL; - if (e && e->type->ops.prepare_request) { - if (e->type->icq_cache) - blk_mq_sched_assign_ioc(rq); - - e->type->ops.prepare_request(rq); - rq->rq_flags |= RQF_ELVPRIV; - } - } - data->hctx->queued++; return rq; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2e12320cb121..a5047c7e9448 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -81,7 +81,8 @@ typedef __u32 __bitwise req_flags_t; #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ #define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* elevator private data attached */ +/* The request has been inserted to an elevator, and thus has private + data attached */ #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) /* account into disk and partition IO statistics */ #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -- 2.31.1