Work with nvme native multipath, if a path related error occurs when queue_rq call HBA drive to send request, queue_rq will return BLK_STS_IOERR to blk-mq. The request is completed with BLK_STS_IOERR instead of fail over to retry. queue_rq need call nvme_complete_rq to complete the request with NVME_SC_HOST_PATH_ERROR, the request will fail over to retry if needed. Signed-off-by: Chao Leng <lengchao@xxxxxxxxxx> --- drivers/nvme/host/rdma.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cf6c49d09c82..5fb0838a5f8b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2030,6 +2030,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); blk_status_t ret; int err; + bool driver_error = false; WARN_ON_ONCE(rq->tag < 0); @@ -2077,8 +2078,10 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr ? &req->reg_wr.wr : NULL); - if (unlikely(err)) + if (unlikely(err)) { + driver_error = true; goto err_unmap; + } return BLK_STS_OK; @@ -2093,6 +2096,10 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, unmap_qe: ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), DMA_TO_DEVICE); + if (driver_error && ret == BLK_STS_IOERR) { + nvme_complete_failed_req(rq); + ret = BLK_STS_OK; + } return ret; } -- 2.16.4