The nr_hw_queue update could potentially race with disk addtion/removal while registering/unregistering hctx sysfs files. The __blk_mq_update_ nr_hw_queues() runs with q->tag_list_lock held and so to avoid it racing with disk addition/removal we should acquire q->tag_list_lock while registering/unregistering hctx sysfs files. With this patch, blk_mq_sysfs_register() (called during disk addition) and blk_mq_sysfs_unregister() (called during disk removal) now runs with q->tag_list_lock held so that it avoids racing with __blk_mq_update _nr_hw_queues(). Signed-off-by: Nilay Shroff <nilay@xxxxxxxxxxxxx> --- block/blk-mq-sysfs.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 6113328abd70..2b9df2b73bf6 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -229,22 +229,31 @@ int blk_mq_sysfs_register(struct gendisk *disk) kobject_uevent(q->mq_kobj, KOBJ_ADD); + mutex_lock(&q->tag_set->tag_list_lock); + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); - if (ret) + if (ret) { + mutex_unlock(&q->tag_set->tag_list_lock); goto unreg; + } } + mutex_unlock(&q->tag_set->tag_list_lock); out: return ret; unreg: + mutex_lock(&q->tag_set->tag_list_lock); + queue_for_each_hw_ctx(q, hctx, j) { if (j < i) blk_mq_unregister_hctx(hctx); } + mutex_unlock(&q->tag_set->tag_list_lock); + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); kobject_del(q->mq_kobj); return ret; @@ -256,10 +265,13 @@ void blk_mq_sysfs_unregister(struct gendisk *disk) struct blk_mq_hw_ctx *hctx; unsigned long i; + mutex_lock(&q->tag_set->tag_list_lock); queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); + mutex_unlock(&q->tag_set->tag_list_lock); + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); kobject_del(q->mq_kobj); } -- 2.47.1