All the 4 host drivers don't use managed irq for completing request, so it is correct to pass the flag to blk-mq. Secondly with this flag, blk-mq will help us dispatch connect request allocated via blk_mq_alloc_request_hctx() to driver even though all CPU in the specified hctx's cpumask are offline. Cc: Sagi Grimberg <sagi@xxxxxxxxxxx> Cc: Daniel Wagner <dwagner@xxxxxxx> Cc: Wen Xiong <wenxiong@xxxxxxxxxx> Cc: John Garry <john.garry@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- drivers/nvme/host/fc.c | 3 ++- drivers/nvme/host/rdma.c | 3 ++- drivers/nvme/host/tcp.c | 3 ++- drivers/nvme/target/loop.c | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 256e87721a01..c563a2b6e9fc 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2876,7 +2876,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; - ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_NOT_USE_MANAGED_IRQ; ctrl->tag_set.cmd_size = struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, ctrl->lport->ops->fcprqst_priv_sz); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 37943dc4c2c1..4b7bdc829109 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -817,7 +817,8 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, set->queue_depth = nctrl->sqsize + 1; set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; - set->flags = BLK_MQ_F_SHOULD_MERGE; + set->flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_NOT_USE_MANAGED_IRQ; set->cmd_size = sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; if (nctrl->max_integrity_segments) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 34f4b3402f7c..0125463b7d77 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1600,7 +1600,8 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, set->queue_depth = nctrl->sqsize + 1; set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; - set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; + set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING | + BLK_MQ_F_NOT_USE_MANAGED_IRQ; set->cmd_size = sizeof(struct nvme_tcp_request); set->driver_data = ctrl; set->nr_hw_queues = nctrl->queue_count - 1; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index cb30cb942e1d..bf032249e010 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -524,7 +524,8 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; - ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_NOT_USE_MANAGED_IRQ; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->tag_set.driver_data = ctrl; -- 2.31.1