BZ: 1948690 Upstream Status: RHEL-only Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> rhel-8.git commit f8fb6ea1226e2abc525c88da13b346118d548eea Author: Mike Snitzer <snitzer@xxxxxxxxxx> Date: Tue Aug 25 21:52:46 2020 -0400 [nvme] nvme: update failover handling to work with REQ_FAILFAST_TRANSPORT Message-id: <20200825215248.2291-9-snitzer@xxxxxxxxxx> Patchwork-id: 325177 Patchwork-instance: patchwork O-Subject: [RHEL8.3 PATCH 08/10] nvme: update failover handling to work with REQ_FAILFAST_TRANSPORT Bugzilla: 1843515 RH-Acked-by: David Milburn <dmilburn@xxxxxxxxxx> RH-Acked-by: Gopal Tiwari <gtiwari@xxxxxxxxxx> RH-Acked-by: Ewan Milne <emilne@xxxxxxxxxx> BZ: 1843515 Upstream Status: RHEL-only If REQ_FAILFAST_TRANSPORT is set it means the driver should not retry IO that completed with transport errors. REQ_FAILFAST_TRANSPORT is set by multipathing software (e.g. dm-multipath) before it issues IO. Update NVMe to prepare for failover of requests marked with either REQ_NVME_MPATH or REQ_FAILFAST_TRANSPORT. This allows such requests to be given a disposition of FAILOVER. Introduce nvme_end_req_with_failover() for use in nvme_complete_rq() if REQ_NVME_MPATH isn't set. nvme_end_req_with_failover() ensures request is completed with a retryable IO error when appropriate. __nvme_end_req() was factored out for use by both nvme_end_req() and nvme_end_req_with_failover(). Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> Signed-off-by: Frantisek Hrbata <fhrbata@xxxxxxxxxx> --- drivers/nvme/host/core.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) Index: linux-rhel9/drivers/nvme/host/core.c =================================================================== --- linux-rhel9.orig/drivers/nvme/host/core.c +++ linux-rhel9/drivers/nvme/host/core.c @@ -311,7 +311,7 @@ static inline enum nvme_disposition nvme nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if (req->cmd_flags & REQ_NVME_MPATH) { + if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) { if (nvme_is_path_error(nvme_req(req)->status) || blk_queue_dying(req->q)) return FAILOVER; @@ -323,10 +323,8 @@ static inline enum nvme_disposition nvme return RETRY; } -static inline void nvme_end_req(struct request *req) +static inline void __nvme_end_req(struct request *req, blk_status_t status) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, @@ -336,6 +334,28 @@ static inline void nvme_end_req(struct r blk_mq_end_request(req, status); } +static inline void nvme_end_req(struct request *req) +{ + __nvme_end_req(req, nvme_error_status(nvme_req(req)->status)); +} + +static inline void nvme_end_req_with_failover(struct request *req) +{ + u16 nvme_status = nvme_req(req)->status; + blk_status_t status = nvme_error_status(nvme_status); + + if (unlikely(nvme_status & NVME_SC_DNR)) + goto out; + + if (!blk_path_error(status)) { + pr_debug("Request meant for failover but blk_status_t (errno=%d) was not retryable.\n", + blk_status_to_errno(status)); + status = BLK_STS_IOERR; + } +out: + __nvme_end_req(req, status); +} + void nvme_complete_rq(struct request *req) { trace_nvme_complete_rq(req); @@ -352,7 +372,10 @@ void nvme_complete_rq(struct request *re nvme_retry_req(req); return; case FAILOVER: - nvme_failover_req(req); + if (req->cmd_flags & REQ_NVME_MPATH) + nvme_failover_req(req); + else + nvme_end_req_with_failover(req); return; } } -- dm-devel mailing list dm-devel@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/dm-devel