This is to protect the rqos list against the rqos open/close. We need to drain all of the caller of blk_mq_submit_bio() before we can operate the rqos list. Signed-off-by: Wang Jianchao (Kuaishou) <jianchao.wan9@xxxxxxxxx> --- block/blk-mq.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1adfe4824ef5..3c1cd32c72fd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2723,8 +2723,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, }; struct request *rq; - if (unlikely(bio_queue_enter(bio))) - return NULL; + percpu_ref_get(&q->q_usage_counter); if (plug) { data.nr_tags = plug->nr_ios; @@ -2789,15 +2788,18 @@ void blk_mq_submit_bio(struct bio *bio) if (unlikely(!blk_crypto_bio_prep(&bio))) return; + if (unlikely(bio_queue_enter(bio))) + return; + blk_queue_bounce(q, &bio); if (blk_may_split(q, bio)) __blk_queue_split(q, &bio, &nr_segs); if (!bio_integrity_prep(bio)) - return; + goto exit; if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) - return; + goto exit; rq_qos_throttle(q, bio); @@ -2805,7 +2807,7 @@ void blk_mq_submit_bio(struct bio *bio) if (!rq) { rq = blk_mq_get_new_requests(q, plug, bio); if (unlikely(!rq)) - return; + goto exit; } trace_block_getrq(bio); @@ -2819,12 +2821,12 @@ void blk_mq_submit_bio(struct bio *bio) bio->bi_status = ret; bio_endio(bio); blk_mq_free_request(rq); - return; + goto exit; } if (op_is_flush(bio->bi_opf)) { blk_insert_flush(rq); - return; + goto exit; } if (plug) @@ -2836,6 +2838,8 @@ void blk_mq_submit_bio(struct bio *bio) else blk_mq_run_dispatch_ops(rq->q, blk_mq_try_issue_directly(rq->mq_hctx, rq)); +exit: + blk_queue_exit(q); } /** -- 2.17.1