[PATCH 4/5] block: move queue enter logic into blk_mq_submit_bio()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Retain the old logic for the fops based submit, but for our internal
blk_mq_submit_bio(), move the queue entering logic into the core
function itself.

We need to be a bit careful if going into the scheduler, as a scheduler
or queue mappings can arbitrarily change before we have entered the queue.
Have the bio scheduler mapping do that separately, it's a very cheap
operation compared to actually doing merging locking and lookups.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 block/blk-core.c     | 17 ++++++---------
 block/blk-mq-sched.c | 13 ++++++++---
 block/blk-mq.c       | 51 +++++++++++++++++++++++++++++---------------
 block/blk.h          |  1 +
 4 files changed, 52 insertions(+), 30 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index e00f5a2287cc..18aab7f8469a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -746,7 +746,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 	return BLK_STS_OK;
 }
 
-static noinline_for_stack bool submit_bio_checks(struct bio *bio)
+noinline_for_stack bool submit_bio_checks(struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
 	struct request_queue *q = bdev_get_queue(bdev);
@@ -868,18 +868,15 @@ static void __submit_bio(struct bio *bio)
 {
 	struct gendisk *disk = bio->bi_bdev->bd_disk;
 
-	if (unlikely(bio_queue_enter(bio) != 0))
-		return;
-
-	if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
-		goto queue_exit;
 	if (!disk->fops->submit_bio) {
 		blk_mq_submit_bio(bio);
-		return;
+	} else {
+		if (unlikely(bio_queue_enter(bio) != 0))
+			return;
+		if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
+			disk->fops->submit_bio(bio);
+		blk_queue_exit(disk->queue);
 	}
-	disk->fops->submit_bio(bio);
-queue_exit:
-	blk_queue_exit(disk->queue);
 }
 
 /*
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4a6789e4398b..4be652fa38e7 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 	bool ret = false;
 	enum hctx_type type;
 
-	if (e && e->type->ops.bio_merge)
-		return e->type->ops.bio_merge(q, bio, nr_segs);
+	if (bio_queue_enter(bio))
+		return false;
+
+	if (e && e->type->ops.bio_merge) {
+		ret = e->type->ops.bio_merge(q, bio, nr_segs);
+		goto out_put;
+	}
 
 	ctx = blk_mq_get_ctx(q);
 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
 	type = hctx->type;
 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
 	    list_empty_careful(&ctx->rq_lists[type]))
-		return false;
+		goto out_put;
 
 	/* default per sw-queue merge */
 	spin_lock(&ctx->lock);
@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 		ret = true;
 
 	spin_unlock(&ctx->lock);
+out_put:
+	blk_queue_exit(q);
 	return ret;
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f7f36d5ed25a..b0c0eac43eef 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2487,12 +2487,21 @@ static inline struct request *blk_get_plug_request(struct request_queue *q,
 	if (!plug)
 		return NULL;
 	rq = rq_list_peek(&plug->cached_rq);
-	if (rq) {
-		plug->cached_rq = rq_list_next(rq);
-		INIT_LIST_HEAD(&rq->queuelist);
-		return rq;
-	}
-	return NULL;
+	if (!rq)
+		return NULL;
+	if (unlikely(!submit_bio_checks(bio)))
+		return ERR_PTR(-EIO);
+	plug->cached_rq = rq_list_next(rq);
+	INIT_LIST_HEAD(&rq->queuelist);
+	rq_qos_throttle(q, bio);
+	return rq;
+}
+
+static inline bool blk_mq_queue_enter(struct request_queue *q, struct bio *bio)
+{
+	if (!blk_try_enter_queue(q, false) && bio_queue_enter(bio))
+		return false;
+	return true;
 }
 
 /**
@@ -2518,31 +2527,41 @@ void blk_mq_submit_bio(struct bio *bio)
 	unsigned int nr_segs = 1;
 	blk_status_t ret;
 
+	if (unlikely(!blk_crypto_bio_prep(&bio)))
+		return;
+
 	blk_queue_bounce(q, &bio);
 	if (blk_may_split(q, bio))
 		__blk_queue_split(q, &bio, &nr_segs);
 
 	if (!bio_integrity_prep(bio))
-		goto queue_exit;
+		return;
 
 	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
 		if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
-			goto queue_exit;
+			return;
 		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
-			goto queue_exit;
+			return;
 	}
 
-	rq_qos_throttle(q, bio);
-
 	plug = blk_mq_plug(q, bio);
 	rq = blk_get_plug_request(q, plug, bio);
-	if (!rq) {
+	if (IS_ERR(rq)) {
+		return;
+	} else if (!rq) {
 		struct blk_mq_alloc_data data = {
 			.q		= q,
 			.nr_tags	= 1,
 			.cmd_flags	= bio->bi_opf,
 		};
 
+		if (unlikely(!blk_mq_queue_enter(q, bio)))
+			return;
+		if (unlikely(!submit_bio_checks(bio)))
+			goto put_exit;
+
+		rq_qos_throttle(q, bio);
+
 		if (plug) {
 			data.nr_tags = plug->nr_ios;
 			plug->nr_ios = 1;
@@ -2553,7 +2572,9 @@ void blk_mq_submit_bio(struct bio *bio)
 			rq_qos_cleanup(q, bio);
 			if (bio->bi_opf & REQ_NOWAIT)
 				bio_wouldblock_error(bio);
-			goto queue_exit;
+put_exit:
+			blk_queue_exit(q);
+			return;
 		}
 	}
 
@@ -2636,10 +2657,6 @@ void blk_mq_submit_bio(struct bio *bio)
 		/* Default case. */
 		blk_mq_sched_insert_request(rq, false, true, true);
 	}
-
-	return;
-queue_exit:
-	blk_queue_exit(q);
 }
 
 static size_t order_to_size(unsigned int order)
diff --git a/block/blk.h b/block/blk.h
index f7371d3b1522..79c98ced59c8 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q);
 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
 void blk_queue_start_drain(struct request_queue *q);
 int bio_queue_enter(struct bio *bio);
+bool submit_bio_checks(struct bio *bio);
 
 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
 {
-- 
2.33.1




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux