Re: [PATCH V8 07/11] blk-mq: stop to handle IO and drain IO before hctx becomes inactive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Apr 24, 2020 at 03:27:45PM +0200, Hannes Reinecke wrote:
> On 4/24/20 12:23 PM, Ming Lei wrote:
> > Before one CPU becomes offline, check if it is the last online CPU of hctx.
> > If yes, mark this hctx as inactive, meantime wait for completion of all
> > in-flight IOs originated from this hctx. Meantime check if this hctx has
> > become inactive in blk_mq_get_driver_tag(), if yes, release the
> > allocated tag.
> > 
> > This way guarantees that there isn't any inflight IO before shutdowning
> > the managed IRQ line when all CPUs of this IRQ line is offline.
> > 
> > Cc: John Garry <john.garry@xxxxxxxxxx>
> > Cc: Bart Van Assche <bvanassche@xxxxxxx>
> > Cc: Hannes Reinecke <hare@xxxxxxxx>
> > Cc: Christoph Hellwig <hch@xxxxxx>
> > Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> > Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
> > ---
> >   block/blk-mq-debugfs.c |   1 +
> >   block/blk-mq.c         | 124 +++++++++++++++++++++++++++++++++++++----
> >   include/linux/blk-mq.h |   3 +
> >   3 files changed, 117 insertions(+), 11 deletions(-)
> > 
> > diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
> > index 8e745826eb86..b62390918ca5 100644
> > --- a/block/blk-mq-debugfs.c
> > +++ b/block/blk-mq-debugfs.c
> > @@ -213,6 +213,7 @@ static const char *const hctx_state_name[] = {
> >   	HCTX_STATE_NAME(STOPPED),
> >   	HCTX_STATE_NAME(TAG_ACTIVE),
> >   	HCTX_STATE_NAME(SCHED_RESTART),
> > +	HCTX_STATE_NAME(INACTIVE),
> >   };
> >   #undef HCTX_STATE_NAME
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index d432cc74ef78..4d0c271d9f6f 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -1050,11 +1050,31 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
> >   	return true;
> >   }
> > -static bool blk_mq_get_driver_tag(struct request *rq)
> > +static bool blk_mq_get_driver_tag(struct request *rq, bool direct_issue)
> >   {
> >   	if (rq->tag != -1)
> >   		return true;
> > -	return __blk_mq_get_driver_tag(rq);
> > +
> > +	if (!__blk_mq_get_driver_tag(rq))
> > +		return false;
> > +	/*
> > +	 * Add one memory barrier in case that direct issue IO process is
> > +	 * migrated to other CPU which may not belong to this hctx, so we can
> > +	 * order driver tag assignment and checking BLK_MQ_S_INACTIVE.
> > +	 * Otherwise, barrier() is enough given both setting BLK_MQ_S_INACTIVE
> > +	 * and driver tag assignment are run on the same CPU in case that
> > +	 * BLK_MQ_S_INACTIVE is set.
> > +	 */
> > +	if (unlikely(direct_issue && rq->mq_ctx->cpu != raw_smp_processor_id()))
> > +		smp_mb();
> > +	else
> > +		barrier();
> > +
> > +	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &rq->mq_hctx->state))) {
> > +		blk_mq_put_driver_tag(rq);
> > +		return false;
> > +	}
> > +	return true;
> >   }
> >   static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
> > @@ -1103,7 +1123,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
> >   		 * Don't clear RESTART here, someone else could have set it.
> >   		 * At most this will cost an extra queue run.
> >   		 */
> > -		return blk_mq_get_driver_tag(rq);
> > +		return blk_mq_get_driver_tag(rq, false);
> >   	}
> >   	wait = &hctx->dispatch_wait;
> > @@ -1129,7 +1149,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
> >   	 * allocation failure and adding the hardware queue to the wait
> >   	 * queue.
> >   	 */
> > -	ret = blk_mq_get_driver_tag(rq);
> > +	ret = blk_mq_get_driver_tag(rq, false);
> >   	if (!ret) {
> >   		spin_unlock(&hctx->dispatch_wait_lock);
> >   		spin_unlock_irq(&wq->lock);
> > @@ -1228,7 +1248,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
> >   			break;
> >   		}
> > -		if (!blk_mq_get_driver_tag(rq)) {
> > +		if (!blk_mq_get_driver_tag(rq, false)) {
> >   			/*
> >   			 * The initial allocation attempt failed, so we need to
> >   			 * rerun the hardware queue when a tag is freed. The
> > @@ -1260,7 +1280,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
> >   			bd.last = true;
> >   		else {
> >   			nxt = list_first_entry(list, struct request, queuelist);
> > -			bd.last = !blk_mq_get_driver_tag(nxt);
> > +			bd.last = !blk_mq_get_driver_tag(nxt, false);
> >   		}
> >   		ret = q->mq_ops->queue_rq(hctx, &bd);
> > @@ -1853,7 +1873,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
> >   	if (!blk_mq_get_dispatch_budget(hctx))
> >   		goto insert;
> > -	if (!blk_mq_get_driver_tag(rq)) {
> > +	if (!blk_mq_get_driver_tag(rq, true)) {
> >   		blk_mq_put_dispatch_budget(hctx);
> >   		goto insert;
> >   	}
> > @@ -2261,13 +2281,92 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
> >   	return -ENOMEM;
> >   }
> > -static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
> > +struct count_inflight_data {
> > +	unsigned count;
> > +	struct blk_mq_hw_ctx *hctx;
> > +};
> > +
> > +static bool blk_mq_count_inflight_rq(struct request *rq, void *data,
> > +				     bool reserved)
> >   {
> > -	return 0;
> > +	struct count_inflight_data *count_data = data;
> > +
> > +	/*
> > +	 * Can't check rq's state because it is updated to MQ_RQ_IN_FLIGHT
> > +	 * in blk_mq_start_request(), at that time we can't prevent this rq
> > +	 * from being issued.
> > +	 *
> > +	 * So check if driver tag is assigned, if yes, count this rq as
> > +	 * inflight.
> > +	 */
> > +	if (rq->tag >= 0 && rq->mq_hctx == count_data->hctx)
> > +		count_data->count++;
> > +
> > +	return true;
> > +}
> > +
> > +static bool blk_mq_inflight_rq(struct request *rq, void *data,
> > +			       bool reserved)
> > +{
> > +	return rq->tag >= 0;
> > +}
> > +
> > +static unsigned blk_mq_tags_inflight_rqs(struct blk_mq_hw_ctx *hctx)
> > +{
> > +	struct count_inflight_data count_data = {
> > +		.count	= 0,
> > +		.hctx	= hctx,
> > +	};
> > +
> > +	blk_mq_all_tag_busy_iter(hctx->tags, blk_mq_count_inflight_rq,
> > +			blk_mq_inflight_rq, &count_data);
> > +
> > +	return count_data.count;
> > +}
> > +
> 
> Remind me again: Why do we need the 'filter' function here?
> Can't we just move the filter function into the main iterator and
> stay with the original implementation?

This is a good question, cause it takes John and me days for figuring
out reason of one IO hang.

The main iterator only iterates requests which blk_mq_request_started()
returns true, see bt_tags_iter().

However, we prevent rq from being allocated a driver tag in
blk_mq_get_driver_tag(). There is still some distance between assigning
driver tag and calling into blk_mq_start_request() which is called from
.queue_rq().

More importantly we can check nothing in blk_mq_start_request().


Thanks, 
Ming




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux