Set the maximal data transfer size according to device capabilities. For example, for T10-DIF offload by supporting RDMA HCA, one uses RDMA/rw API that limits the IO operation to use a single MR with 256 pages at the most. Limit the mdts according to RDMA/rw API and even decrease it in order to avoid multiple splits by the local block layer for large IOs to ease on the CPU on the target side. Signed-off-by: Max Gurtovoy <maxg@xxxxxxxxxxxx> --- drivers/nvme/target/rdma.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 37d262a..2227adf 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -30,6 +30,7 @@ #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE #define NVMET_RDMA_MAX_INLINE_SGE 4 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) +#define NVMET_RDMA_T10_PI_MDTS 5 struct nvmet_rdma_cmd { struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; @@ -1602,6 +1603,21 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, } } +static u8 nvmet_rdma_set_mdts(struct nvmet_ctrl *ctrl) +{ + struct nvmet_port *port = ctrl->port; + struct rdma_cm_id *cm_id = port->priv; + u32 max_pages; + + if (ctrl->pi_support) { + max_pages = rdma_rw_fr_page_list_len(cm_id->device, true); + /* Assume mpsmin == device_page_size == 4KB */ + return min(ilog2(max_pages), NVMET_RDMA_T10_PI_MDTS); + } + + return 0; +} + static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, @@ -1612,6 +1628,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, + .set_mdts = nvmet_rdma_set_mdts, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) -- 1.8.3.1