Firstly code becomes more clean by switching to xarray from plain array. Secondly use-after-free on q->queue_hw_ctx can be fixed because queue_for_each_hw_ctx() may be run when updating nr_hw_queues is in-progress. With this patch, q->hctx_table is defined as xarray, and this structure will share same lifetime with request queue, so queue_for_each_hw_ctx() can use q->hctx_table to lookup hctx reliably. Reported-by: Yu Kuai <yukuai3@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-tag.c | 2 +- block/blk-mq.c | 48 +++++++++++++++++------------------------- include/linux/blk-mq.h | 4 ++-- include/linux/blkdev.h | 2 +- 4 files changed, 23 insertions(+), 33 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 0fd409b8e86e..eb33667d63b2 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -498,7 +498,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, void *priv) { /* - * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx + * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table * while the queue is frozen. So we can use q_usage_counter to avoid * racing with it. */ diff --git a/block/blk-mq.c b/block/blk-mq.c index d01c1693a3d4..273137dc3ffd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3437,6 +3437,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, blk_mq_remove_cpuhp(hctx); + xa_erase(&q->hctx_table, hctx_idx); + spin_lock(&q->unused_hctx_lock); list_add(&hctx->hctx_list, &q->unused_hctx_list); spin_unlock(&q->unused_hctx_lock); @@ -3476,8 +3478,15 @@ static int blk_mq_init_hctx(struct request_queue *q, if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) goto exit_hctx; + + if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) + goto exit_flush_rq; + return 0; + exit_flush_rq: + if (set->ops->exit_request) + set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -3855,7 +3864,7 @@ void blk_mq_release(struct request_queue *q) kobject_put(&hctx->kobj); } - kfree(q->queue_hw_ctx); + xa_destroy(&q->hctx_table); /* * release .mq_kobj and sw queue's kobject now because @@ -3945,45 +3954,26 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { int i, j, end; - struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; - - if (q->nr_hw_queues < set->nr_hw_queues) { - struct blk_mq_hw_ctx **new_hctxs; - - new_hctxs = kcalloc_node(set->nr_hw_queues, - sizeof(*new_hctxs), GFP_KERNEL, - set->numa_node); - if (!new_hctxs) - return; - if (hctxs) - memcpy(new_hctxs, hctxs, q->nr_hw_queues * - sizeof(*hctxs)); - q->queue_hw_ctx = new_hctxs; - kfree(hctxs); - hctxs = new_hctxs; - } /* protect against switching io scheduler */ mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int old_node; int node = blk_mq_get_hctx_node(set, i); - struct blk_mq_hw_ctx *old_hctx = hctxs[i]; + struct blk_mq_hw_ctx *old_hctx = blk_mq_get_hctx(q, i); if (old_hctx) { old_node = old_hctx->numa_node; blk_mq_exit_hctx(q, set, old_hctx, i); } - hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node); - if (!hctxs[i]) { + if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { if (!old_hctx) break; pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", node, old_node); - hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, - old_node); - WARN_ON_ONCE(!hctxs[i]); + WARN_ON_ONCE(!blk_mq_alloc_and_init_hctx(set, q, i, + old_node)); } } /* @@ -4000,12 +3990,10 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } for (; j < end; j++) { - struct blk_mq_hw_ctx *hctx = hctxs[j]; + struct blk_mq_hw_ctx *hctx = blk_mq_get_hctx(q, j); - if (hctx) { + if (hctx) blk_mq_exit_hctx(q, set, hctx, j); - hctxs[j] = NULL; - } } mutex_unlock(&q->sysfs_lock); } @@ -4045,6 +4033,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, INIT_LIST_HEAD(&q->unused_hctx_list); spin_lock_init(&q->unused_hctx_lock); + xa_init(&q->hctx_table); + blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; @@ -4074,7 +4064,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, return 0; err_hctxs: - kfree(q->queue_hw_ctx); + xa_destroy(&q->hctx_table); q->nr_hw_queues = 0; blk_mq_sysfs_deinit(q); err_poll: diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3d949b1968f3..b102ff64e56a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -919,12 +919,12 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) static inline struct blk_mq_hw_ctx *blk_mq_get_hctx(struct request_queue *q, unsigned int hctx_idx) { - return q->queue_hw_ctx[hctx_idx]; + return xa_load(&q->hctx_table, hctx_idx); } #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = blk_mq_get_hctx((q), (i)); 1; }); (i)++) + (hctx = blk_mq_get_hctx((q), (i))); (i)++) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f757f9c2871f..a53ae40aaded 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -355,7 +355,7 @@ struct request_queue { unsigned int queue_depth; /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct xarray hctx_table; unsigned int nr_hw_queues; /* -- 2.31.1