From: Chuck Lever <chuck.lever@xxxxxxxxxx> For upper layer protocols that request rw_ctxs, ib_create_qp() adjusts ib_qp_init_attr::max_send_wr to accommodate the WQEs those rw_ctxs will consume. See rdma_rw_init_qp() for details. To actually use those additional WQEs, svc_rdma_accept() needs to retrieve the corrected SQ depth after calling rdma_create_qp() and set newxprt->sc_sq_depth and newxprt->sc_sq_avail so that svc_rdma_send() and svc_rdma_post_chunk_ctxt() can utilize those WQEs. The NVMe target driver, for example, already does this properly. Fixes: 26fb2254dd33 ("svcrdma: Estimate Send Queue depth properly") Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- net/sunrpc/xprtrdma/svc_rdma_transport.c | 36 ++++++++++++++++++------------ 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 4a038c7e86f9..75f1481fbca0 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -370,12 +370,12 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, */ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) { + unsigned int ctxts, rq_depth, sq_depth; struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct rpcrdma_connect_private pmsg; struct ib_qp_init_attr qp_attr; - unsigned int ctxts, rq_depth; struct ib_device *dev; int ret = 0; RPC_IFDEBUG(struct sockaddr *sap); @@ -422,24 +422,29 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_max_requests = rq_depth - 2; newxprt->sc_max_bc_requests = 2; } + sq_depth = rq_depth; + ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES); ctxts *= newxprt->sc_max_requests; - newxprt->sc_sq_depth = rq_depth + ctxts; - if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) - newxprt->sc_sq_depth = dev->attrs.max_qp_wr; - atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); newxprt->sc_pd = ib_alloc_pd(dev, 0); if (IS_ERR(newxprt->sc_pd)) { trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd)); goto errout; } - newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth, + + /* The Completion Queue depth is the maximum number of signaled + * WRs expected to be in flight. Every Send WR is signaled, and + * each rw_ctx has a chain of WRs, but only one WR in each chain + * is signaled. + */ + newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, sq_depth + ctxts, IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_sq_cq)) goto errout; - newxprt->sc_rq_cq = - ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); + /* Every Receive WR is signaled. */ + newxprt->sc_rq_cq = ib_alloc_cq_any(dev, newxprt, rq_depth, + IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_rq_cq)) goto errout; @@ -448,7 +453,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) qp_attr.qp_context = &newxprt->sc_xprt; qp_attr.port_num = newxprt->sc_port_num; qp_attr.cap.max_rdma_ctxs = ctxts; - qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts; + qp_attr.cap.max_send_wr = sq_depth; qp_attr.cap.max_recv_wr = rq_depth; qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges; qp_attr.cap.max_recv_sge = 1; @@ -456,17 +461,20 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = newxprt->sc_sq_cq; qp_attr.recv_cq = newxprt->sc_rq_cq; - dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n", - qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr); - dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n", - qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); - ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { trace_svcrdma_qp_err(newxprt, ret); goto errout; } + dprintk("svcrdma: cap.max_send_wr = %d, cap.max_recv_wr = %d\n", + qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr); + dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n", + qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); + dprintk(" send CQ depth = %d, recv CQ depth = %d\n", + sq_depth, rq_depth); newxprt->sc_qp = newxprt->sc_cm_id->qp; + newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; + atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) newxprt->sc_snd_w_inv = false;