From: Keith Busch <keith.busch@xxxxxxxxx> commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream. A recent commit runs tag iterator callbacks under the rcu read lock, but existing callbacks do not satisfy the non-blocking requirement. The commit intended to prevent an iterator from accessing a queue that's being modified. This patch fixes the original issue by taking a queue reference instead of reading it, which allows callbacks to make blocking calls. Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") Acked-by: Jianchao Wang <jianchao.w.wang@xxxxxxxxxx> Signed-off-by: Keith Busch <keith.busch@xxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Giuliano Procida <gprocida@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- block/blk-mq-tag.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -336,16 +336,11 @@ void blk_mq_queue_tag_busy_iter(struct r /* * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and - * queue_hw_ctx after freeze the queue. So we could use q_usage_counter - * to avoid race with it. __blk_mq_update_nr_hw_queues will users - * synchronize_rcu to ensure all of the users go out of the critical - * section below and see zeroed q_usage_counter. + * queue_hw_ctx after freeze the queue, so we use q_usage_counter + * to avoid race with it. */ - rcu_read_lock(); - if (percpu_ref_is_zero(&q->q_usage_counter)) { - rcu_read_unlock(); + if (!percpu_ref_tryget(&q->q_usage_counter)) return; - } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -361,7 +356,7 @@ void blk_mq_queue_tag_busy_iter(struct r bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } - rcu_read_unlock(); + blk_queue_exit(q); } static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,