diff --git a/block/blk-flush.c b/block/blk-flush.c
index aedd9320e605..f3ef6ce05c78 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -212,6 +212,14 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
struct blk_mq_hw_ctx *hctx;
+ if (!refcount_dec_and_test(&flush_rq->ref)) {
+ fq->rq_status = error;
+ return;
+ }
+
+ if (fq->rq_status != BLK_STS_OK)
+ error = fq->rq_status;
+
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
hctx = flush_rq->mq_hctx;
spin_lock_irqsave(&fq->mq_flush_lock, flags) may need to move up to
refcount_dec_and_test(). Otherwise, the race between timeout handle
and completion can lead to getting wrong 'rq_status' value. I will resend a
fixed version.
Thanks,
Yufen
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0835f4d8d42e..eec2ec4c79bd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -905,7 +905,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
- if (refcount_dec_and_test(&rq->ref))
+
+ if (is_flush_rq(rq, hctx))
+ rq->end_io(rq, 0);
+ else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
return true;
diff --git a/block/blk.h b/block/blk.h
index de6b2e146d6e..d3ed80f144c6 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -30,6 +30,7 @@ struct blk_flush_queue {
*/
struct request *orig_rq;
spinlock_t mq_flush_lock;
+ blk_status_t rq_status;
};
extern struct kmem_cache *blk_requestq_cachep;
@@ -47,6 +48,12 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
+static inline bool
+is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
+{
+ return hctx->fq->flush_rq == req;
+}
+
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);