We need to improve the logic here a bit, most importantly ensuring that the request matches the current queue. If it doesn't, we cannot use it and must fallback to normal request alloc. Fixes: 47c122e35d7e ("block: pre-allocate requests if plug is started and is a batch") Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- block/blk-mq.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 4bc98c7264fa..e92c36f2326a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2485,6 +2485,24 @@ static inline bool blk_mq_queue_enter(struct request_queue *q, struct bio *bio) return true; } +static inline struct request *blk_get_plug_request(struct request_queue *q, + struct blk_plug *plug, + struct bio *bio) +{ + struct request *rq; + + if (plug && !rq_list_empty(plug->cached_rq)) { + rq = rq_list_peek(&plug->cached_rq); + if (rq->q == q) { + rq_qos_throttle(q, bio); + plug->cached_rq = rq_list_next(rq); + INIT_LIST_HEAD(&rq->queuelist); + return rq; + } + } + return NULL; +} + /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. @@ -2523,11 +2541,8 @@ void blk_mq_submit_bio(struct bio *bio) } plug = blk_mq_plug(q, bio); - if (plug && plug->cached_rq) { - rq = rq_list_pop(&plug->cached_rq); - INIT_LIST_HEAD(&rq->queuelist); - rq_qos_throttle(q, bio); - } else { + rq = blk_get_plug_request(q, plug, bio); + if (!rq) { struct blk_mq_alloc_data data = { .q = q, .nr_tags = 1, -- 2.33.1