commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream. This change is a back-ported fix to the back-port of f5bbbbe4d6357, a439abbd6e707232b1f399e6df1a85ace42e8f9f. A recent commit runs tag iterator callbacks under the rcu read lock, but existing callbacks do not satisfy the non-blocking requirement. The commit intended to prevent an iterator from accessing a queue that's being modified. This patch fixes the original issue by taking a queue reference instead of reading it, which allows callbacks to make blocking calls. Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") Acked-by: Jianchao Wang <jianchao.w.wang@xxxxxxxxxx> Signed-off-by: Keith Busch <keith.busch@xxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Giuliano Procida <gprocida@xxxxxxxxxx> --- block/blk-mq-tag.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index bf356de30134..c1c654319287 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -484,11 +484,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, /* * Avoid potential races with things like queue removal. */ - rcu_read_lock(); - if (percpu_ref_is_zero(&q->q_usage_counter)) { - rcu_read_unlock(); + if (!percpu_ref_tryget(&q->q_usage_counter)) return; - } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -505,7 +502,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, false); } - rcu_read_unlock(); + blk_queue_exit(q); } static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) -- 2.26.0.292.g33ef6b2f38-goog