The current blk_mq_quiesce_queue() and blk_mq_unquiesce_queue() always stops and starts the queue unconditionally. And there can be concurrent quiesce/unquiesce coming from different unrelated code paths, so unquiesce may come unexpectedly and start queue too early. Prepare for supporting concurrent quiesce/unquiesce from multiple contexts, so that we can address the above issue. NVMe has very complicated quiesce/unquiesce use pattern, add one atomic bit for makeiing sure that blk-mq quiece/unquiesce is always called in pair. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- drivers/nvme/host/core.c | 12 ++++++++---- drivers/nvme/host/nvme.h | 2 ++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 23fb746a8970..4391760e2e9a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4452,12 +4452,14 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl); static void nvme_start_ns_queue(struct nvme_ns *ns) { - blk_mq_unquiesce_queue(ns->queue); + if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags)) + blk_mq_unquiesce_queue(ns->queue); } static void nvme_stop_ns_queue(struct nvme_ns *ns) { - blk_mq_quiesce_queue(ns->queue); + if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags)) + blk_mq_quiesce_queue(ns->queue); } /* @@ -4575,13 +4577,15 @@ EXPORT_SYMBOL_GPL(nvme_start_queues); void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) { - blk_mq_quiesce_queue(ctrl->admin_q); + if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_quiesce_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); void nvme_start_admin_queue(struct nvme_ctrl *ctrl) { - blk_mq_unquiesce_queue(ctrl->admin_q); + if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_unquiesce_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_start_admin_queue); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 47877a5f1515..0fa6fa22f088 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -336,6 +336,7 @@ struct nvme_ctrl { int nr_reconnects; unsigned long flags; #define NVME_CTRL_FAILFAST_EXPIRED 0 +#define NVME_CTRL_ADMIN_Q_STOPPED 1 struct nvmf_ctrl_options *opts; struct page *discard_page; @@ -457,6 +458,7 @@ struct nvme_ns { #define NVME_NS_ANA_PENDING 2 #define NVME_NS_FORCE_RO 3 #define NVME_NS_READY 4 +#define NVME_NS_STOPPED 5 struct cdev cdev; struct device cdev_device; -- 2.31.1