Signed-off-by: Bart Van Assche <bart.vanassche@xxxxxxxxxxx> Cc: Steve Wise <swise@xxxxxxxxxxxxxxxxxxxxx> Cc: Chuck Lever <chuck.lever@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Max Gurtovoy <maxg@xxxxxxxxxxxx> --- drivers/infiniband/core/cq.c | 4 ++-- drivers/infiniband/core/verbs.c | 35 +++++++++++++++-------------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index a754fc727de5..48242785b323 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -58,8 +58,8 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different * context and does not ask for completion interrupts from the HCA. * - * Note: for compatibility reasons -1 can be passed in %budget for unlimited - * polling. Do not use this feature in new code, it will be removed soon. + * Note: do not pass -1 as %budget unless it is guaranteed that the number + * of completions that will be processed is small. */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 71580cc28c9e..42f8927b542c 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1949,17 +1949,12 @@ static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) */ static void __ib_drain_sq(struct ib_qp *qp) { + struct ib_cq *cq = qp->send_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe sdrain; struct ib_send_wr swr = {}, *bad_swr; int ret; - if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { - WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, - "IB_POLL_DIRECT poll_ctx not supported for drain\n"); - return; - } - swr.wr_cqe = &sdrain.cqe; sdrain.cqe.done = ib_drain_qp_done; init_completion(&sdrain.done); @@ -1976,7 +1971,11 @@ static void __ib_drain_sq(struct ib_qp *qp) return; } - wait_for_completion(&sdrain.done); + if (cq->poll_ctx == IB_POLL_DIRECT) + while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) + ib_process_cq_direct(cq, -1); + else + wait_for_completion(&sdrain.done); } /* @@ -1984,17 +1983,12 @@ static void __ib_drain_sq(struct ib_qp *qp) */ static void __ib_drain_rq(struct ib_qp *qp) { + struct ib_cq *cq = qp->recv_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe rdrain; struct ib_recv_wr rwr = {}, *bad_rwr; int ret; - if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { - WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, - "IB_POLL_DIRECT poll_ctx not supported for drain\n"); - return; - } - rwr.wr_cqe = &rdrain.cqe; rdrain.cqe.done = ib_drain_qp_done; init_completion(&rdrain.done); @@ -2011,7 +2005,11 @@ static void __ib_drain_rq(struct ib_qp *qp) return; } - wait_for_completion(&rdrain.done); + if (cq->poll_ctx == IB_POLL_DIRECT) + while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) + ib_process_cq_direct(cq, -1); + else + wait_for_completion(&rdrain.done); } /** @@ -2028,8 +2026,7 @@ static void __ib_drain_rq(struct ib_qp *qp) * ensure there is room in the CQ and SQ for the drain work request and * completion. * - * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be - * IB_POLL_DIRECT. + * allocate the CQ using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. @@ -2057,8 +2054,7 @@ EXPORT_SYMBOL(ib_drain_sq); * ensure there is room in the CQ and RQ for the drain work request and * completion. * - * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be - * IB_POLL_DIRECT. + * allocate the CQ using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. @@ -2082,8 +2078,7 @@ EXPORT_SYMBOL(ib_drain_rq); * ensure there is room in the CQ(s), SQ, and RQ for drain work requests * and completions. * - * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be - * IB_POLL_DIRECT. + * allocate the CQs using ib_alloc_cq(). * * ensure that there are no other contexts that are posting WRs concurrently. * Otherwise the drain is not guaranteed. -- 2.11.0 -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html