blk_exit_queue will free elevator_data, while blk_mq_run_work_fn will access it. Move cancel of hctx->run_work to the front of blk_exit_queue to avoid use-after-free. Fixes: 1b97871b501f ("blk-mq: move cancel of hctx->run_work into blk_mq_hw_sysfs_release") Signed-off-by: Yang Yang <yang.yang@xxxxxxxx> --- block/blk-mq-sysfs.c | 2 -- block/blk-sysfs.c | 9 ++++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 062229395a50..7b52e7657b2d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj) struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); - cancel_delayed_work_sync(&hctx->run_work); - if (hctx->flags & BLK_MQ_F_BLOCKING) cleanup_srcu_struct(hctx->srcu); blk_free_flush_queue(hctx->fq); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7dda709f3ccb..8c6bafc801dd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -934,9 +934,16 @@ static void blk_release_queue(struct kobject *kobj) blk_free_queue_stats(q->stats); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + struct blk_mq_hw_ctx *hctx; + int i; + cancel_delayed_work_sync(&q->requeue_work); + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } + blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); -- 2.17.1