Currently, the block debugfs attributes (tags, tags_bitmap, sched_tags, and sched_tags_bitmap) are protected using q->sysfs_lock. However, these attributes are updated in multiple scenarios: - During driver probe method - During an elevator switch/update - During an nr_hw_queues update - When writing to the sysfs attribute nr_requests All these update paths (except driver probe method which anyways doesn't not require any protection) are already protected using q->elevator_lock .So to ensure consistency and proper synchronization, replace q->sysfs_ lock with q->elevator_lock for protecting these debugfs attributes. Additionally, debugfs attribute "busy" is currently unprotected. This attribute iterates over all started requests in a tagset and prints them. However, the tags can be updated simultaneously via the sysfs attribute "nr_requests" or "scheduler" (elevator switch), leading to potential race conditions. Since the sysfs attributes "nr_requests" and "scheduler" are already protected using q->elevator_lock, extend this protection to the debugfs "busy" attribute as well. This change ensures that all relevant debugfs attributes are properly synchronized, preventing potential data inconsistencies. Signed-off-by: Nilay Shroff <nilay@xxxxxxxxxxxxx> --- Please note that this patch was unit tested against blktests and quick xfstests. --- block/blk-mq-debugfs.c | 25 +++++++++++++++---------- include/linux/blkdev.h | 6 +++--- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index adf5f0697b6b..d26a6df945bd 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -347,11 +347,16 @@ static int hctx_busy_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct show_busy_params params = { .m = m, .hctx = hctx }; + int res; + res = mutex_lock_interruptible(&hctx->queue->elevator_lock); + if (res) + goto out; blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, ¶ms); - - return 0; + mutex_unlock(&hctx->queue->elevator_lock); +out: + return res; } static const char *const hctx_types[] = { @@ -400,12 +405,12 @@ static int hctx_tags_show(void *data, struct seq_file *m) struct request_queue *q = hctx->queue; int res; - res = mutex_lock_interruptible(&q->sysfs_lock); + res = mutex_lock_interruptible(&q->elevator_lock); if (res) goto out; if (hctx->tags) blk_mq_debugfs_tags_show(m, hctx->tags); - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->elevator_lock); out: return res; @@ -417,12 +422,12 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m) struct request_queue *q = hctx->queue; int res; - res = mutex_lock_interruptible(&q->sysfs_lock); + res = mutex_lock_interruptible(&q->elevator_lock); if (res) goto out; if (hctx->tags) sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->elevator_lock); out: return res; @@ -434,12 +439,12 @@ static int hctx_sched_tags_show(void *data, struct seq_file *m) struct request_queue *q = hctx->queue; int res; - res = mutex_lock_interruptible(&q->sysfs_lock); + res = mutex_lock_interruptible(&q->elevator_lock); if (res) goto out; if (hctx->sched_tags) blk_mq_debugfs_tags_show(m, hctx->sched_tags); - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->elevator_lock); out: return res; @@ -451,12 +456,12 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) struct request_queue *q = hctx->queue; int res; - res = mutex_lock_interruptible(&q->sysfs_lock); + res = mutex_lock_interruptible(&q->elevator_lock); if (res) goto out; if (hctx->sched_tags) sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->elevator_lock); out: return res; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 22600420799c..709a32022c78 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -568,9 +568,9 @@ struct request_queue { * nr_requests and wbt latency, this lock also protects the sysfs attrs * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update * may modify hctx tags, reserved-tags and cpumask, so this lock also - * helps protect the hctx attrs. To ensure proper locking order during - * an elevator or nr_hw_queue update, first freeze the queue, then - * acquire ->elevator_lock. + * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking + * order during an elevator or nr_hw_queue update, first freeze the + * queue, then acquire ->elevator_lock. */ struct mutex elevator_lock; -- 2.47.1