There is a problem that nbd_handle_reply() might access freed request: 1) At first, a normal io is submitted and completed with scheduler: internel_tag = blk_mq_get_tag -> get tag from sched_tags blk_mq_rq_ctx_init sched_tags->rq[internel_tag] = sched_tag->static_rq[internel_tag] ... blk_mq_get_driver_tag __blk_mq_get_driver_tag -> get tag from tags tags->rq[tag] = sched_tag->static_rq[internel_tag] So, both tags->rq[tag] and sched_tags->rq[internel_tag] are pointing to the request: sched_tags->static_rq[internal_tag]. Even if the io is finished. 2) nbd server send a reply with random tag directly: recv_work nbd_handle_reply blk_mq_tag_to_rq(tags, tag) rq = tags->rq[tag] 3) if the sched_tags->static_rq is freed: blk_mq_sched_free_requests blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i) -> step 2) access rq before clearing rq mapping blk_mq_clear_rq_mapping(set, tags, hctx_idx); __free_pages() -> rq is freed here 4) Then, nbd continue to use the freed request in nbd_handle_reply Fix the problem by get 'q_usage_counter' before blk_mq_tag_to_rq(), thus request is ensured not to be freed because 'q_usage_counter' is not zero. Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> --- block/blk-core.c | 1 + drivers/block/nbd.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 5454db2fa263..2008e6903166 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -489,6 +489,7 @@ void blk_queue_exit(struct request_queue *q) { percpu_ref_put(&q->q_usage_counter); } +EXPORT_SYMBOL(blk_queue_exit); static void blk_queue_usage_counter_release(struct percpu_ref *ref) { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9a7bbf8ebe74..f065afcc7586 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -824,6 +824,7 @@ static void recv_work(struct work_struct *work) work); struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; + struct request_queue *q = nbd->disk->queue; struct nbd_sock *nsock; struct nbd_cmd *cmd; struct request *rq; @@ -834,13 +835,29 @@ static void recv_work(struct work_struct *work) if (nbd_read_reply(nbd, args->index, &reply)) break; + /* + * Get q_usage_counter can prevent accessing freed request + * through blk_mq_tag_to_rq() in nbd_handle_reply(). If + * q_usage_counter is zero, then no request is inflight, which + * means something is wrong since we expect to find a request to + * complete here. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) { + dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", + __func__); + break; + } + cmd = nbd_handle_reply(nbd, args->index, &reply); - if (IS_ERR(cmd)) + if (IS_ERR(cmd)) { + blk_queue_exit(q); break; + } rq = blk_mq_rq_from_pdu(cmd); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); + blk_queue_exit(q); } nsock = config->socks[args->index]; -- 2.31.1