blk-mq uses static queue mapping between sw queue and hw queue to retrieve hw queue, then allocate request on the retrieved hw queue's request pool. blk_mq_alloc_request_hctx() requires to specify one hctx index and ask blk-mq to allocate request from this hctx's request pool. This way is quite ugly given the hctx can become inactive any time because of CPU hotplug. Kernel oops on NVMe FC/LOOP/RDMA/TCP has been reported several times because of CPU hotplug. The only user is NVMe loop, FC, RDMA and TCP driver for submitting connect command. All these drivers have been conveted to use generic API of blk_mq_alloc_request() to allocate request for NVMe connect command. So kill it now. Cc: James Smart <james.smart@xxxxxxxxxxxx> Cc: Sagi Grimberg <sagi@xxxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 46 ------------------------------------------ include/linux/blk-mq.h | 3 --- 2 files changed, 49 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 5c9adcaa27ac..a360fe70ec98 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -427,52 +427,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, } EXPORT_SYMBOL(blk_mq_alloc_request); -struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) -{ - struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; - struct request *rq; - unsigned int cpu; - int ret; - - /* - * If the tag allocator sleeps we could get an allocation for a - * different hardware context. No need to complicate the low level - * allocator for this for the rare use case of a command tied to - * a specific queue. - */ - if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) - return ERR_PTR(-EINVAL); - - if (hctx_idx >= q->nr_hw_queues) - return ERR_PTR(-EIO); - - ret = blk_queue_enter(q, flags); - if (ret) - return ERR_PTR(ret); - - /* - * Check if the hardware context is actually mapped to anything. - * If not tell the caller that it should skip this queue. - */ - alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; - if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { - blk_queue_exit(q); - return ERR_PTR(-EXDEV); - } - cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); - alloc_data.ctx = __blk_mq_get_ctx(q, cpu); - - rq = blk_mq_get_request(q, NULL, &alloc_data); - blk_queue_exit(q); - - if (!rq) - return ERR_PTR(-EWOULDBLOCK); - - return rq; -} -EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); - static void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index dc03e059fdff..a0c65de93e8c 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -441,9 +441,6 @@ enum { struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags); -struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags, - unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); enum { -- 2.20.1