[PATCH V3] blk-mq: fix race between complete and BLK_EH_RESET_TIMER

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The normal request completion can be done before or during handling
BLK_EH_RESET_TIMER, and this race may cause the request to never
be completed since driver's .timeout() may always return
BLK_EH_RESET_TIMER.

This issue can't be fixed completely by driver, since the normal
completion can be done between returning .timeout() and handling
BLK_EH_RESET_TIMER.

This patch fixes the race by introducing rq state of MQ_RQ_COMPLETE_IN_RESET,
and reading/writing rq's state by holding queue lock, which can be
per-request actually, but just not necessary to introduce one lock for
so unusual event.

Also when .timeout() returns BLK_EH_HANDLED, sync with normal completion
path before completing this timed-out rq finally for avoiding this rq's
state touched by normal completion.

Cc: "jianchao.wang" <jianchao.w.wang@xxxxxxxxxx>
Cc: Bart Van Assche <bart.vanassche@xxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Ming Lei <ming.lei@xxxxxxxxxx>
Cc: Sagi Grimberg <sagi@xxxxxxxxxxx>
Cc: Israel Rukshin <israelr@xxxxxxxxxxxx>,
Cc: Max Gurtovoy <maxg@xxxxxxxxxxxx>
Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
---
V3:
	- before completing rq for BLK_EH_HANDLED, sync with normal
	  completion path
	- make sure rq's state updated as MQ_RQ_IN_FLIGHT before completing
V2:
	- rename the new flag as MQ_RQ_COMPLETE_IN_TIMEOUT
	- fix lock uses in blk_mq_rq_timed_out
	- document re-order between blk_add_timer() and
	blk_mq_rq_update_aborted_gstate(req, 0)

 block/blk-mq.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++-----------
 block/blk-mq.h |  1 +
 2 files changed, 70 insertions(+), 16 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0dc9e341c2a7..d70f69a32226 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -198,23 +198,12 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
 
-/**
- * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
- * @q: request queue.
- *
- * Note: this function does not prevent that the struct request end_io()
- * callback function is invoked. Once this function is returned, we make
- * sure no dispatch can happen until the queue is unquiesced via
- * blk_mq_unquiesce_queue().
- */
-void blk_mq_quiesce_queue(struct request_queue *q)
+static void blk_mq_queue_synchronize_rcu(struct request_queue *q)
 {
 	struct blk_mq_hw_ctx *hctx;
 	unsigned int i;
 	bool rcu = false;
 
-	blk_mq_quiesce_queue_nowait(q);
-
 	queue_for_each_hw_ctx(q, hctx, i) {
 		if (hctx->flags & BLK_MQ_F_BLOCKING)
 			synchronize_srcu(hctx->srcu);
@@ -224,6 +213,21 @@ void blk_mq_quiesce_queue(struct request_queue *q)
 	if (rcu)
 		synchronize_rcu();
 }
+
+/**
+ * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
+ * @q: request queue.
+ *
+ * Note: this function does not prevent that the struct request end_io()
+ * callback function is invoked. Once this function is returned, we make
+ * sure no dispatch can happen until the queue is unquiesced via
+ * blk_mq_unquiesce_queue().
+ */
+void blk_mq_quiesce_queue(struct request_queue *q)
+{
+	blk_mq_quiesce_queue_nowait(q);
+	blk_mq_queue_synchronize_rcu(q);
+}
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
 
 /*
@@ -630,10 +634,27 @@ void blk_mq_complete_request(struct request *rq)
 	 * However, that would complicate paths which want to synchronize
 	 * against us.  Let stay in sync with the issue path so that
 	 * hctx_lock() covers both issue and completion paths.
+	 *
+	 * Cover complete vs BLK_EH_RESET_TIMER race in slow path with
+	 * holding queue lock.
 	 */
 	hctx_lock(hctx, &srcu_idx);
 	if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
 		__blk_mq_complete_request(rq);
+	else {
+		unsigned long flags;
+		bool need_complete = false;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		if (!blk_mq_rq_aborted_gstate(rq))
+			need_complete = true;
+		else
+			blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE_IN_TIMEOUT);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+
+		if (need_complete)
+			__blk_mq_complete_request(rq);
+	}
 	hctx_unlock(hctx, srcu_idx);
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
@@ -814,6 +835,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
 {
 	const struct blk_mq_ops *ops = req->q->mq_ops;
 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
+	unsigned long flags;
 
 	req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
 
@@ -822,16 +844,47 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
 
 	switch (ret) {
 	case BLK_EH_HANDLED:
+		/*
+		 * If .timeout returns BLK_EH_HANDLED, this rq shouldn't
+		 * be completed by normal irq context any more, but for
+		 * the sake of safety, sync with normal completion path
+		 * before completing this request finally because the
+		 * normal completion path may touch this rq's state.
+		 */
+		blk_mq_queue_synchronize_rcu(req->q);
+
+		spin_lock_irqsave(req->q->queue_lock, flags);
+ complete_rq:
+		if (blk_mq_rq_state(req) == MQ_RQ_COMPLETE_IN_TIMEOUT)
+			blk_mq_rq_update_state(req, MQ_RQ_IN_FLIGHT);
+		spin_unlock_irqrestore(req->q->queue_lock, flags);
 		__blk_mq_complete_request(req);
 		break;
 	case BLK_EH_RESET_TIMER:
 		/*
-		 * As nothing prevents from completion happening while
-		 * ->aborted_gstate is set, this may lead to ignored
-		 * completions and further spurious timeouts.
+		 * The normal completion may happen during handling the
+		 * timeout, or even after returning from .timeout(), so
+		 * once the request has been completed, we can't reset
+		 * timer any more since this request may be handled as
+		 * BLK_EH_RESET_TIMER in next timeout handling too, and
+		 * it has to be completed in this situation.
+		 *
+		 * Holding the queue lock to cover read/write rq's
+		 * aborted_gstate and normal state, so the race can be
+		 * avoided completely.
+		 *
+		 * blk_add_timer() may be re-ordered with resetting
+		 * aborted_gstate, and the only side-effec is that if this
+		 * request is recycled after aborted_gstate is cleared, it
+		 * may be timed out a bit late, that is what we can survive
+		 * given timeout event is so unusual.
 		 */
-		blk_mq_rq_update_aborted_gstate(req, 0);
+		spin_lock_irqsave(req->q->queue_lock, flags);
+		if (blk_mq_rq_state(req) == MQ_RQ_COMPLETE_IN_TIMEOUT)
+			goto complete_rq;
 		blk_add_timer(req);
+		blk_mq_rq_update_aborted_gstate(req, 0);
+		spin_unlock_irqrestore(req->q->queue_lock, flags);
 		break;
 	case BLK_EH_NOT_HANDLED:
 		break;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 88c558f71819..0426d048743d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -35,6 +35,7 @@ enum mq_rq_state {
 	MQ_RQ_IDLE		= 0,
 	MQ_RQ_IN_FLIGHT		= 1,
 	MQ_RQ_COMPLETE		= 2,
+	MQ_RQ_COMPLETE_IN_TIMEOUT	= 3,
 
 	MQ_RQ_STATE_BITS	= 2,
 	MQ_RQ_STATE_MASK	= (1 << MQ_RQ_STATE_BITS) - 1,
-- 
2.9.5




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux