On Sat 10-06-23 10:30:43, Yu Kuai wrote: > From: Yu Kuai <yukuai3@xxxxxxxxxx> > > In __blk_mq_tag_busy/idle(), updating 'active_queues' and calculating > 'wake_batch' is not atomic: > > t1: t2: > _blk_mq_tag_busy blk_mq_tag_busy > inc active_queues > // assume 1->2 > inc active_queues > // 2 -> 3 > blk_mq_update_wake_batch > // calculate based on 3 > blk_mq_update_wake_batch > /* calculate based on 2, while active_queues is actually 3. */ > > Fix this problem by protecting them wih 'tags->lock', this is not a hot > path, so performance should not be concerned. And now that all writers > are inside the lock, switch 'actives_queues' from atomic to unsigned > int. > > Fixes: 180dccb0dba4 ("blk-mq: fix tag_get wait task can't be awakened") > Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> Looks good. Feel free to add: Reviewed-by: Jan Kara <jack@xxxxxxx> Honza > --- > Changes in v2: > - switch 'active_queues' from atomic to unsigned int. > > block/blk-mq-debugfs.c | 2 +- > block/blk-mq-tag.c | 15 ++++++++++----- > block/blk-mq.h | 3 +-- > include/linux/blk-mq.h | 3 +-- > 4 files changed, 13 insertions(+), 10 deletions(-) > > diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c > index 68165a50951b..c3b5930106b2 100644 > --- a/block/blk-mq-debugfs.c > +++ b/block/blk-mq-debugfs.c > @@ -401,7 +401,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m, > seq_printf(m, "nr_tags=%u\n", tags->nr_tags); > seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); > seq_printf(m, "active_queues=%d\n", > - atomic_read(&tags->active_queues)); > + READ_ONCE(tags->active_queues)); > > seq_puts(m, "\nbitmap_tags:\n"); > sbitmap_queue_show(&tags->bitmap_tags, m); > diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c > index dfd81cab5788..cc57e2dd9a0b 100644 > --- a/block/blk-mq-tag.c > +++ b/block/blk-mq-tag.c > @@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, > void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) > { > unsigned int users; > + struct blk_mq_tags *tags = hctx->tags; > > /* > * calling test_bit() prior to test_and_set_bit() is intentional, > @@ -55,9 +56,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) > return; > } > > - users = atomic_inc_return(&hctx->tags->active_queues); > - > - blk_mq_update_wake_batch(hctx->tags, users); > + spin_lock_irq(&tags->lock); > + users = tags->active_queues + 1; > + WRITE_ONCE(tags->active_queues, users); > + blk_mq_update_wake_batch(tags, users); > + spin_unlock_irq(&tags->lock); > } > > /* > @@ -90,9 +93,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) > return; > } > > - users = atomic_dec_return(&tags->active_queues); > - > + spin_lock_irq(&tags->lock); > + users = tags->active_queues - 1; > + WRITE_ONCE(tags->active_queues, users); > blk_mq_update_wake_batch(tags, users); > + spin_unlock_irq(&tags->lock); > > blk_mq_tag_wakeup_all(tags, false); > } > diff --git a/block/blk-mq.h b/block/blk-mq.h > index 8c642e9f32f1..1743857e0b01 100644 > --- a/block/blk-mq.h > +++ b/block/blk-mq.h > @@ -412,8 +412,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, > return true; > } > > - users = atomic_read(&hctx->tags->active_queues); > - > + users = READ_ONCE(hctx->tags->active_queues); > if (!users) > return true; > > diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h > index 59b52ec155b1..f401067ac03a 100644 > --- a/include/linux/blk-mq.h > +++ b/include/linux/blk-mq.h > @@ -739,8 +739,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, > struct blk_mq_tags { > unsigned int nr_tags; > unsigned int nr_reserved_tags; > - > - atomic_t active_queues; > + unsigned int active_queues; > > struct sbitmap_queue bitmap_tags; > struct sbitmap_queue breserved_tags; > -- > 2.39.2 > -- Jan Kara <jack@xxxxxxxx> SUSE Labs, CR