This helper is pretty pointless now, and also in the way of per-tagset
quiesce.
Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
drivers/nvme/host/core.c | 18 ++++--------------
1 file changed, 4 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa7fdb744979c..0ab3a18fd9f85 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -5104,17 +5104,6 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns)
blk_mq_wait_quiesce_done(ns->queue->tag_set);
}
-/*
- * Prepare a queue for teardown.
- *
- * This must forcibly unquiesce queues to avoid blocking dispatch.
- */
-static void nvme_set_queue_dying(struct nvme_ns *ns)
-{
- blk_mark_disk_dead(ns->disk);
- nvme_start_ns_queue(ns);
-}
-
/**
* nvme_kill_queues(): Ends all namespace queues
* @ctrl: the dead controller that needs to end
@@ -5130,10 +5119,11 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
nvme_start_admin_queue(ctrl);
-
if (!test_and_set_bit(NVME_CTRL_NS_DEAD, &ctrl->flags)) {
- list_for_each_entry(ns, &ctrl->namespaces, list)
- nvme_set_queue_dying(ns);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ blk_mark_disk_dead(ns->disk);
+ nvme_start_ns_queue(ns);
+ }
I have to say that I always found nvme_kill_queues interface somewhat
odd. its a core function that unquiesces the admin/io queues
assuming that they were stopped at some point by the driver.
If now there is no dependency between unquiesce and blk_mark_disk_dead,
maybe it would be a good idea to move the unquiescing to the drivers
which can pair with the quiesce itself, and rename it to
nvme_mark_namespaces_dead() or something?