On 5/20/20 4:56 AM, Ming Lei wrote: > During waiting for in-flight IO completion in reset handler, timeout > or controller failure still may happen, then the controller is deleted > and all inflight IOs are failed. This way is too violent. > > Improve the reset handling by replacing nvme_wait_freeze with query > & check controller. If all ns queues are frozen, the controller is reset > successfully, otherwise check and see if the controller has been disabled. > If yes, break from the current recovery and schedule a fresh new reset. > > This way avoids to failing IO & removing controller unnecessarily. > > Cc: Christoph Hellwig <hch@xxxxxx> > Cc: Sagi Grimberg <sagi@xxxxxxxxxxx> > Cc: Keith Busch <kbusch@xxxxxxxxxx> > Cc: Max Gurtovoy <maxg@xxxxxxxxxxxx> > Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> > --- > drivers/nvme/host/pci.c | 37 ++++++++++++++++++++++++++++++------- > 1 file changed, 30 insertions(+), 7 deletions(-) > > diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c > index ce0d1e79467a..b5aeed33a634 100644 > --- a/drivers/nvme/host/pci.c > +++ b/drivers/nvme/host/pci.c > @@ -24,6 +24,7 @@ > #include <linux/io-64-nonatomic-lo-hi.h> > #include <linux/sed-opal.h> > #include <linux/pci-p2pdma.h> > +#include <linux/delay.h> > > #include "trace.h" > #include "nvme.h" > @@ -1235,9 +1236,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) > * shutdown, so we return BLK_EH_DONE. > */ > switch (dev->ctrl.state) { > - case NVME_CTRL_CONNECTING: > - nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); > - /* fall through */ > case NVME_CTRL_DELETING: > dev_warn_ratelimited(dev->ctrl.device, > "I/O %d QID %d timeout, disable controller\n", > @@ -2393,7 +2391,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) > u32 csts = readl(dev->bar + NVME_REG_CSTS); > > if (dev->ctrl.state == NVME_CTRL_LIVE || > - dev->ctrl.state == NVME_CTRL_RESETTING) { > + dev->ctrl.state == NVME_CTRL_RESETTING || > + dev->ctrl.state == NVME_CTRL_CONNECTING) { > freeze = true; > nvme_start_freeze(&dev->ctrl); > } > @@ -2504,12 +2503,29 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev) > nvme_put_ctrl(&dev->ctrl); > } > > +static bool nvme_wait_freeze_and_check(struct nvme_dev *dev) > +{ > + bool frozen; > + > + while (true) { > + frozen = nvme_frozen(&dev->ctrl); > + if (frozen) > + break; > + if (!dev->online_queues) > + break; > + msleep(5); > + } > + > + return frozen; > +} > + > static void nvme_reset_work(struct work_struct *work) > { > struct nvme_dev *dev = > container_of(work, struct nvme_dev, ctrl.reset_work); > bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); > int result; > + bool reset_done = true; > > if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { > result = -ENODEV; > @@ -2606,8 +2622,9 @@ static void nvme_reset_work(struct work_struct *work) > nvme_free_tagset(dev); > } else { > nvme_start_queues(&dev->ctrl); > - nvme_wait_freeze(&dev->ctrl); > - nvme_dev_add(dev); > + reset_done = nvme_wait_freeze_and_check(dev); Once we arrive at here, it indicates "dev->online_queues >= 2". 2601 if (dev->online_queues < 2) { 2602 dev_warn(dev->ctrl.device, "IO queues not created\n"); 2603 nvme_kill_queues(&dev->ctrl); 2604 nvme_remove_namespaces(&dev->ctrl); 2605 nvme_free_tagset(dev); 2606 } else { 2607 nvme_start_queues(&dev->ctrl); 2608 nvme_wait_freeze(&dev->ctrl); 2609 nvme_dev_add(dev); 2610 nvme_unfreeze(&dev->ctrl); 2611 } Is there any reason to check "if (!dev->online_queues)" in nvme_wait_freeze_and_check()? Thank you very much! Dongli Zhang > + if (reset_done) > + nvme_dev_add(dev); > nvme_unfreeze(&dev->ctrl); > } > > @@ -2622,7 +2639,13 @@ static void nvme_reset_work(struct work_struct *work) > goto out; > } > > - nvme_start_ctrl(&dev->ctrl); > + /* New error happens during reset, so schedule a new reset */ > + if (!reset_done) { > + dev_warn(dev->ctrl.device, "new error during reset\n"); > + nvme_reset_ctrl(&dev->ctrl); > + } else { > + nvme_start_ctrl(&dev->ctrl); > + } > return; > > out_unlock: >