Re: [PATCH] blk-mq: setup blk_mq_alloc_data.cmd_flags after submit_bio_checks() is done

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Nov 12, 2021 at 09:44:41AM +0100, Christoph Hellwig wrote:
> On Fri, Nov 12, 2021 at 04:37:19PM +0800, Ming Lei wrote:
> > > can only be used for reads, and no fua can be set if the preallocating
> > > I/O didn't use fua, etc.
> > > 
> > > What are the pitfalls of just chanigng cmd_flags?
> > 
> > Then we need to check cmd_flags carefully, such as hctx->type has to
> > be same, flush & passthrough flags has to be same, that said all
> > ->cmd_flags used for allocating rqs have to be same with the following
> > bio->bi_opf.
> > 
> > In usual cases, I guess all IOs submitted from same plug batch should be
> > same type. If not, we can switch to change cmd_flags.
> 
> Jens: is this a limit fitting into your use cases?
> 
> I guess as a quick fix this rejecting different flags is probably the
> best we can do for now, but I suspect we'll want to eventually relax
> them.

rw mixed workload will be affected, so I think we need to switch to
change cmd_flags, how about the following patch?

>From 9ab77b7adee768272944c20b7cffc8abdb85a35b Mon Sep 17 00:00:00 2001
From: Ming Lei <ming.lei@xxxxxxxxxx>
Date: Fri, 12 Nov 2021 08:14:38 +0800
Subject: [PATCH] blk-mq: fix filesystem I/O request allocation

submit_bio_checks() may update bio->bi_opf, so we have to initialize
blk_mq_alloc_data.cmd_flags with bio->bi_opf after submit_bio_checks()
returns when allocating new request.

In case of using cached request, fallback to allocate new request if
cached rq isn't compatible with the incoming bio, otherwise change
rq->cmd_flags with incoming bio->bi_opf.

Fixes: 900e080752025f00 ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
---
 block/blk-mq.c | 39 ++++++++++++++++++++++++++++++---------
 block/blk-mq.h | 26 +++++++++++++++-----------
 2 files changed, 45 insertions(+), 20 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f511db395c7f..3ab34c4f20da 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2521,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
 	};
 	struct request *rq;
 
-	if (unlikely(bio_queue_enter(bio)))
-		return NULL;
-	if (unlikely(!submit_bio_checks(bio)))
-		goto put_exit;
 	if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
-		goto put_exit;
+		return NULL;
 
 	rq_qos_throttle(q, bio);
 
@@ -2543,19 +2539,32 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
 	rq_qos_cleanup(q, bio);
 	if (bio->bi_opf & REQ_NOWAIT)
 		bio_wouldblock_error(bio);
-put_exit:
-	blk_queue_exit(q);
+
 	return NULL;
 }
 
+static inline bool blk_mq_can_use_cached_rq(struct request *rq,
+		struct bio *bio)
+{
+	if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+		return false;
+
+	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+		return false;
+
+	return true;
+}
+
 static inline struct request *blk_mq_get_request(struct request_queue *q,
 						 struct blk_plug *plug,
 						 struct bio *bio,
 						 unsigned int nsegs,
 						 bool *same_queue_rq)
 {
+	struct request *rq;
+	bool checked = false;
+
 	if (plug) {
-		struct request *rq;
 
 		rq = rq_list_peek(&plug->cached_rq);
 		if (rq && rq->q == q) {
@@ -2564,6 +2573,10 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
 			if (blk_mq_attempt_bio_merge(q, bio, nsegs,
 						same_queue_rq))
 				return NULL;
+			checked = true;
+			if (!blk_mq_can_use_cached_rq(rq, bio))
+				goto fallback;
+			rq->cmd_flags = bio->bi_opf;
 			plug->cached_rq = rq_list_next(rq);
 			INIT_LIST_HEAD(&rq->queuelist);
 			rq_qos_throttle(q, bio);
@@ -2571,7 +2584,15 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
 		}
 	}
 
-	return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+fallback:
+	if (unlikely(bio_queue_enter(bio)))
+		return NULL;
+	if (!checked && !submit_bio_checks(bio))
+		return NULL;
+	rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+	if (!rq)
+		blk_queue_exit(q);
+	return rq;
 }
 
 /**
diff --git a/block/blk-mq.h b/block/blk-mq.h
index cb0b5482ca5e..4d92fc1124ae 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 }
 
-/*
- * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
- * @flags: request command flags
- * @ctx: software queue cpu ctx
- */
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-						     unsigned int flags,
-						     struct blk_mq_ctx *ctx)
+static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
 {
 	enum hctx_type type = HCTX_TYPE_DEFAULT;
 
@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 		type = HCTX_TYPE_POLL;
 	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
 		type = HCTX_TYPE_READ;
-	
-	return ctx->hctxs[type];
+	return type;
+}
+
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @ctx: software queue cpu ctx
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+						     unsigned int flags,
+						     struct blk_mq_ctx *ctx)
+{
+	return ctx->hctxs[blk_mq_get_hctx_type(flags)];
 }
 
 /*
-- 
2.31.1


-- 
Ming




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux