Retain the old logic for the fops based submit, but for our internal blk_mq_submit_bio(), move the queue entering logic into the core function itself. We need to be a bit careful if going into the scheduler, as a scheduler or queue mappings can arbitrarily change before we have entered the queue. Have the bio scheduler mapping do that separately, it's a very cheap operation compared to actually doing merging locking and lookups. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- block/blk-core.c | 14 ++++++-------- block/blk-mq-sched.c | 13 ++++++++++--- block/blk-mq.c | 28 ++++++++++++++++++---------- 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index e00f5a2287cc..2b12a427ffa6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -868,18 +868,16 @@ static void __submit_bio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; - if (unlikely(bio_queue_enter(bio) != 0)) - return; - if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) - goto queue_exit; + return; if (!disk->fops->submit_bio) { blk_mq_submit_bio(bio); - return; + } else { + if (unlikely(bio_queue_enter(bio) != 0)) + return; + disk->fops->submit_bio(bio); + blk_queue_exit(disk->queue); } - disk->fops->submit_bio(bio); -queue_exit: - blk_queue_exit(disk->queue); } /* diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4a6789e4398b..4be652fa38e7 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, bool ret = false; enum hctx_type type; - if (e && e->type->ops.bio_merge) - return e->type->ops.bio_merge(q, bio, nr_segs); + if (bio_queue_enter(bio)) + return false; + + if (e && e->type->ops.bio_merge) { + ret = e->type->ops.bio_merge(q, bio, nr_segs); + goto out_put; + } ctx = blk_mq_get_ctx(q); hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); type = hctx->type; if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || list_empty_careful(&ctx->rq_lists[type])) - return false; + goto out_put; /* default per sw-queue merge */ spin_lock(&ctx->lock); @@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, ret = true; spin_unlock(&ctx->lock); +out_put: + blk_queue_exit(q); return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 5498454c2164..4bc98c7264fa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2478,6 +2478,13 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) return BLK_MAX_REQUEST_COUNT; } +static inline bool blk_mq_queue_enter(struct request_queue *q, struct bio *bio) +{ + if (!blk_try_enter_queue(q, false) && bio_queue_enter(bio)) + return false; + return true; +} + /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. @@ -2506,21 +2513,20 @@ void blk_mq_submit_bio(struct bio *bio) __blk_queue_split(q, &bio, &nr_segs); if (!bio_integrity_prep(bio)) - goto queue_exit; + return; if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) - goto queue_exit; + return; if (blk_mq_sched_bio_merge(q, bio, nr_segs)) - goto queue_exit; + return; } - rq_qos_throttle(q, bio); - plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { rq = rq_list_pop(&plug->cached_rq); INIT_LIST_HEAD(&rq->queuelist); + rq_qos_throttle(q, bio); } else { struct blk_mq_alloc_data data = { .q = q, @@ -2528,6 +2534,11 @@ void blk_mq_submit_bio(struct bio *bio) .cmd_flags = bio->bi_opf, }; + if (unlikely(!blk_mq_queue_enter(q, bio))) + return; + + rq_qos_throttle(q, bio); + if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; @@ -2538,7 +2549,8 @@ void blk_mq_submit_bio(struct bio *bio) rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - goto queue_exit; + blk_queue_exit(q); + return; } } @@ -2621,10 +2633,6 @@ void blk_mq_submit_bio(struct bio *bio) /* Default case. */ blk_mq_sched_insert_request(rq, false, true, true); } - - return; -queue_exit: - blk_queue_exit(q); } static size_t order_to_size(unsigned int order) -- 2.33.1