Re: [PATCH V5 3/4] blk-mq: clear stale request in tags->rq[] before freeing one request pool

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, May 06, 2021 at 08:12:56AM +0100, Christoph Hellwig wrote:
> On Wed, May 05, 2021 at 10:58:54PM +0800, Ming Lei wrote:
> > refcount_inc_not_zero() in bt_tags_iter() still may read one freed
> > request.
> > 
> > Fix the issue by the following approach:
> > 
> > 1) hold a per-tags spinlock when reading ->rqs[tag] and calling
> > refcount_inc_not_zero in bt_tags_iter()
> > 
> > 2) clearing stale request referred via ->rqs[tag] before freeing
> > request pool, the per-tags spinlock is held for clearing stale
> > ->rq[tag]
> > 
> > So after we cleared stale requests, bt_tags_iter() won't observe
> > freed request any more, also the clearing will wait for pending
> > request reference.
> > 
> > The idea of clearing ->rqs[] is borrowed from John Garry's previous
> > patch and one recent David's patch.
> > 
> > Tested-by: John Garry <john.garry@xxxxxxxxxx>
> > Reviewed-by: David Jeffery <djeffery@xxxxxxxxxx>
> > Reviewed-by: Bart Van Assche <bvanassche@xxxxxxx>
> > Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
> > ---
> >  block/blk-mq-tag.c |  8 +++++++-
> >  block/blk-mq-tag.h |  3 +++
> >  block/blk-mq.c     | 39 ++++++++++++++++++++++++++++++++++-----
> >  3 files changed, 44 insertions(+), 6 deletions(-)
> > 
> > diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> > index 4a40d409f5dd..8b239dcce85f 100644
> > --- a/block/blk-mq-tag.c
> > +++ b/block/blk-mq-tag.c
> > @@ -203,9 +203,14 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
> >  		unsigned int bitnr)
> >  {
> >  	struct request *rq = tags->rqs[bitnr];
> > +	unsigned long flags;
> >  
> > -	if (!rq || !refcount_inc_not_zero(&rq->ref))
> > +	spin_lock_irqsave(&tags->lock, flags);
> > +	if (!rq || !refcount_inc_not_zero(&rq->ref)) {
> > +		spin_unlock_irqrestore(&tags->lock, flags);
> >  		return NULL;
> > +	}
> > +	spin_unlock_irqrestore(&tags->lock, flags);
> >  	return rq;
> >  }
> >  
> > @@ -538,6 +543,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
> >  
> >  	tags->nr_tags = total_tags;
> >  	tags->nr_reserved_tags = reserved_tags;
> > +	spin_lock_init(&tags->lock);
> >  
> >  	if (blk_mq_is_sbitmap_shared(flags))
> >  		return tags;
> > diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
> > index 7d3e6b333a4a..f942a601b5ef 100644
> > --- a/block/blk-mq-tag.h
> > +++ b/block/blk-mq-tag.h
> > @@ -20,6 +20,9 @@ struct blk_mq_tags {
> >  	struct request **rqs;
> >  	struct request **static_rqs;
> >  	struct list_head page_list;
> > +
> > +	/* used to clear rqs[] before one request pool is freed */
> > +	spinlock_t lock;
> >  };
> >  
> >  extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index d053e80fa6d7..c1b28e09a27e 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -2306,6 +2306,38 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
> >  	return BLK_QC_T_NONE;
> >  }
> >  
> > +static size_t order_to_size(unsigned int order)
> > +{
> > +	return (size_t)PAGE_SIZE << order;
> > +}
> > +
> > +/* called before freeing request pool in @tags */
> > +static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
> > +		struct blk_mq_tags *tags, unsigned int hctx_idx)
> > +{
> > +	struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
> > +	struct page *page;
> > +	unsigned long flags;
> > +
> > +	spin_lock_irqsave(&drv_tags->lock, flags);
> > +	list_for_each_entry(page, &tags->page_list, lru) {
> > +		unsigned long start = (unsigned long)page_address(page);
> > +		unsigned long end = start + order_to_size(page->private);
> > +		int i;
> > +
> > +		for (i = 0; i < set->queue_depth; i++) {
> > +			struct request *rq = drv_tags->rqs[i];
> > +			unsigned long rq_addr = (unsigned long)rq;
> > +
> > +			if (rq_addr >= start && rq_addr < end) {
> > +				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
> > +				cmpxchg(&drv_tags->rqs[i], rq, NULL);
> > +			}
> > +		}
> > +	}
> > +	spin_unlock_irqrestore(&drv_tags->lock, flags);
> 
> This looks really strange.  Why do we need the cmpxchg while under
> the lock anyway?  Why iterate the allocated pages and not just clear

Lock is for protecting free vs. iterating.

> the drv_tags valus directly?
> 
> static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
> 		struct blk_mq_tags *tags, unsigned int hctx_idx)
> {
> 	struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
> 	unsigned int i = 0;
> 	unsigned long flags;
> 
> 	spin_lock_irqsave(&drv_tags->lock, flags);
> 	for (i = 0; i < set->queue_depth; i++)
> 		WARN_ON_ONCE(refcount_read(&drv_tags->rqs[i]->ref) != 0);
> 		drv_tags->rqs[i] = NULL;

drv_tags->rqs[] may be assigned from another LUN at the same time, then
the simple clearing will overwrite the active assignment. We just need
to clear mapping iff the stored rq will be freed.


Thanks,
Ming




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux