From: Bob Pearson <rpearsonhpe@xxxxxxxxx> Currently the requester can continue to process send wqes after an local qp operation error is detected because the setting of the qp state to the error state is deferred until later. This patch splits the qp state for the completer and requester into two separate states and sets qp->req.state = QP_STATE_ERROR as soon as the error is detected before another wqe can be executed. Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> --- V4: new patch --- drivers/infiniband/sw/rxe/rxe_comp.c | 6 +++--- drivers/infiniband/sw/rxe/rxe_qp.c | 5 +++++ drivers/infiniband/sw/rxe/rxe_req.c | 1 + drivers/infiniband/sw/rxe/rxe_verbs.h | 1 + 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index da3a398053b8..0b68630a3e49 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -565,10 +565,10 @@ int rxe_completer(void *arg) if (!rxe_get(qp)) return -EAGAIN; - if (!qp->valid || qp->req.state == QP_STATE_ERROR || - qp->req.state == QP_STATE_RESET) { + if (!qp->valid || qp->comp.state == QP_STATE_ERROR || + qp->comp.state == QP_STATE_RESET) { rxe_drain_resp_pkts(qp, qp->valid && - qp->req.state == QP_STATE_ERROR); + qp->comp.state == QP_STATE_ERROR); ret = -EAGAIN; goto done; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 22e9b85344c3..a95d3b49ae20 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -230,6 +230,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, QUEUE_TYPE_FROM_CLIENT); qp->req.state = QP_STATE_RESET; + qp->comp.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; @@ -490,6 +491,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* move qp to the reset state */ qp->req.state = QP_STATE_RESET; + qp->comp.state = QP_STATE_RESET; qp->resp.state = QP_STATE_RESET; /* let state machines reset themselves drain work and packet queues @@ -552,6 +554,7 @@ void rxe_qp_error(struct rxe_qp *qp) { qp->req.state = QP_STATE_ERROR; qp->resp.state = QP_STATE_ERROR; + qp->comp.state = QP_STATE_ERROR; qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ @@ -689,6 +692,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, pr_debug("qp#%d state -> INIT\n", qp_num(qp)); qp->req.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT; + qp->comp.state = QP_STATE_INIT; break; case IB_QPS_RTR: @@ -699,6 +703,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, case IB_QPS_RTS: pr_debug("qp#%d state -> RTS\n", qp_num(qp)); qp->req.state = QP_STATE_READY; + qp->comp.state = QP_STATE_READY; break; case IB_QPS_SQD: diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 6d2742997e1b..ad25290e393d 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -773,6 +773,7 @@ int rxe_requester(void *arg) /* update wqe_index for each wqe completion */ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); wqe->state = wqe_state_error; + qp->req.state = QP_STATE_ERROR; __rxe_do_task(&qp->comp.task); exit: diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index ac464e68c923..bbfffe243fd6 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -129,6 +129,7 @@ struct rxe_req_info { }; struct rxe_comp_info { + enum rxe_qp_state state; u32 psn; int opcode; int timeout; -- 2.31.1