It has been reported many times that a use-after-free can be intermittently found when iterating busy requests: - https://lore.kernel.org/linux-block/8376443a-ec1b-0cef-8244-ed584b96fa96@xxxxxxxxxx/ - https://lore.kernel.org/linux-block/5c3ac5af-ed81-11e4-fee3-f92175f14daf@xxxxxxx/T/#m6c1ac11540522716f645d004e2a5a13c9f218908 - https://lore.kernel.org/linux-block/04e2f9e8-79fa-f1cb-ab23-4a15bf3f64cc@xxxxxxxxx/ The issue is that when we switch scheduler or change queue depth, there may be references in the driver tagset to the stale requests. As a solution, clean up any references to those requests in the driver tagset. This is done with a cmpxchg to make safe any race with setting the driver tagset request from another queue. Signed-off-by: John Garry <john.garry@xxxxxxxxxx> --- block/blk-mq-sched.c | 2 +- block/blk-mq-tag.c | 2 +- block/blk-mq.c | 20 ++++++++++++++++++-- block/blk-mq.h | 2 ++ 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index ddb65e9e6fd9..bc19bd8f8c7b 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -615,7 +615,7 @@ void blk_mq_sched_free_requests(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) - blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); + blk_mq_free_rqs_ext(q->tag_set, hctx->sched_tags, i, hctx->tags); } } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index ce813b909339..7ff1b20d58e7 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -580,7 +580,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, return -ENOMEM; } - blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); + blk_mq_free_rqs_ext(set, *tagsptr, hctx->queue_num, hctx->tags); blk_mq_free_rq_map(*tagsptr, flags); *tagsptr = new; } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index d4d7c1caa439..9cb60bf7ac24 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2286,8 +2286,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) return BLK_QC_T_NONE; } -void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, - unsigned int hctx_idx) +void __blk_mq_free_rqs_ext(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + unsigned int hctx_idx, struct blk_mq_tags *ref_tags) { struct page *page; @@ -2296,10 +2296,14 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, for (i = 0; i < tags->nr_tags; i++) { struct request *rq = tags->static_rqs[i]; + int j; if (!rq) continue; set->ops->exit_request(set, rq, hctx_idx); + /* clean up any references which occur in @ref_tags */ + for (j = 0; ref_tags && j < ref_tags->nr_tags; j++) + cmpxchg(&ref_tags->rqs[j], rq, 0); tags->static_rqs[i] = NULL; } } @@ -2316,6 +2320,18 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } +void blk_mq_free_rqs_ext(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + unsigned int hctx_idx, struct blk_mq_tags *ref_tags) +{ + __blk_mq_free_rqs_ext(set, tags, hctx_idx, ref_tags); +} + +void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + unsigned int hctx_idx) +{ + __blk_mq_free_rqs_ext(set, tags, hctx_idx, NULL); +} + void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) { kfree(tags->rqs); diff --git a/block/blk-mq.h b/block/blk-mq.h index 3616453ca28c..031e29f74926 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -53,6 +53,8 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); +void blk_mq_free_rqs_ext(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + unsigned int hctx_idx, struct blk_mq_tags *references); void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, -- 2.26.2