[PATCH for-next v2 4/8] RDMA/rxe: Cleanup error state handling in rxe_comp.c

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Cleanup the handling of qp in the error state, reset state and
during rxe_qp_do_cleanup. Make the same as rxe_resp.c

Signed-off-by: Ian Ziemba <ian.ziemba@xxxxxxx>
Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx>
---
 drivers/infiniband/sw/rxe/rxe_comp.c | 55 +++++++++++++++++++++++-----
 drivers/infiniband/sw/rxe/rxe_resp.c | 28 +++++++-------
 2 files changed, 60 insertions(+), 23 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index cbfa16b3a490..ebece584a020 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -542,25 +542,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
 	return COMPST_GET_WQE;
 }
 
-static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
+/* drain incoming response packet queue */
+static void drain_resp_pkts(struct rxe_qp *qp)
 {
 	struct sk_buff *skb;
-	struct rxe_send_wqe *wqe;
-	struct rxe_queue *q = qp->sq.queue;
 
 	while ((skb = skb_dequeue(&qp->resp_pkts))) {
 		rxe_put(qp);
 		kfree_skb(skb);
 		ib_device_put(qp->ibqp.device);
 	}
+}
+
+/* complete send wqe with flush error */
+static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+	struct rxe_cqe cqe = {};
+	struct ib_wc *wc = &cqe.ibwc;
+	struct ib_uverbs_wc *uwc = &cqe.uibwc;
+	int err;
+
+	if (qp->is_user) {
+		uwc->wr_id = wqe->wr.wr_id;
+		uwc->status = wqe->status;
+		uwc->qp_num = qp->ibqp.qp_num;
+	} else {
+		wc->wr_id = wqe->wr.wr_id;
+		wc->status = wqe->status;
+		wc->qp = &qp->ibqp;
+	}
+
+	err = rxe_cq_post(qp->scq, &cqe, 0);
+	if (err)
+		rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+
+	return err;
+}
+
+/* drain and optionally complete the send queue
+ * if unable to complete a wqe stop completing
+ * and flush the remaining wqes
+ */
+static void flush_send_queue(struct rxe_qp *qp, bool notify)
+{
+	struct rxe_send_wqe *wqe;
+	struct rxe_queue *q = qp->sq.queue;
+	int err;
 
 	while ((wqe = queue_head(q, q->type))) {
 		if (notify) {
-			wqe->status = IB_WC_WR_FLUSH_ERR;
-			do_complete(qp, wqe);
-		} else {
-			queue_advance_consumer(q, q->type);
+			err = flush_send_wqe(qp, wqe);
+			if (err)
+				notify = 0;
 		}
+		queue_advance_consumer(q, q->type);
 	}
 }
 
@@ -589,8 +624,10 @@ int rxe_completer(struct rxe_qp *qp)
 
 	if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
 	    qp->comp.state == QP_STATE_RESET) {
-		rxe_drain_resp_pkts(qp, qp->valid &&
-				    qp->comp.state == QP_STATE_ERROR);
+		bool notify = qp->valid &&
+				(qp->comp.state == QP_STATE_ERROR);
+		drain_resp_pkts(qp);
+		flush_send_queue(qp, notify);
 		goto exit;
 	}
 
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 8f9bbb14fa7a..2f71183449f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1396,7 +1396,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
 }
 
 /* drain incoming request packet queue */
-static void rxe_drain_req_pkts(struct rxe_qp *qp)
+static void drain_req_pkts(struct rxe_qp *qp)
 {
 	struct sk_buff *skb;
 
@@ -1408,33 +1408,35 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp)
 }
 
 /* complete receive wqe with flush error */
-static int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
+static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
 {
 	struct rxe_cqe cqe = {};
 	struct ib_wc *wc = &cqe.ibwc;
 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
+	int err;
 
 	if (qp->rcq->is_user) {
+		uwc->wr_id = wqe->wr_id;
 		uwc->status = IB_WC_WR_FLUSH_ERR;
 		uwc->qp_num = qp_num(qp);
-		uwc->wr_id = wqe->wr_id;
 	} else {
+		wc->wr_id = wqe->wr_id;
 		wc->status = IB_WC_WR_FLUSH_ERR;
 		wc->qp = &qp->ibqp;
-		wc->wr_id = wqe->wr_id;
 	}
 
-	if (rxe_cq_post(qp->rcq, &cqe, 0))
-		return -ENOMEM;
+	err = rxe_cq_post(qp->rcq, &cqe, 0);
+	if (err)
+		rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
 
-	return 0;
+	return err;
 }
 
 /* drain and optionally complete the recive queue
  * if unable to complete a wqe stop completing and
  * just flush the remaining wqes
  */
-static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
+static void flush_recv_queue(struct rxe_qp *qp, bool notify)
 {
 	struct rxe_queue *q = qp->rq.queue;
 	struct rxe_recv_wqe *wqe;
@@ -1445,11 +1447,9 @@ static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
 
 	while ((wqe = queue_head(q, q->type))) {
 		if (notify) {
-			err = complete_flush(qp, wqe);
-			if (err) {
-				rxe_dbg_qp(qp, "complete failed for recv wqe");
+			err = flush_recv_wqe(qp, wqe);
+			if (err)
 				notify = 0;
-			}
 		}
 		queue_advance_consumer(q, q->type);
 	}
@@ -1471,8 +1471,8 @@ int rxe_responder(struct rxe_qp *qp)
 	    qp->resp.state == QP_STATE_RESET) {
 		bool notify = qp->valid &&
 				(qp->resp.state == QP_STATE_ERROR);
-		rxe_drain_req_pkts(qp);
-		rxe_drain_recv_queue(qp, notify);
+		drain_req_pkts(qp);
+		flush_recv_queue(qp, notify);
 		goto exit;
 	}
 
-- 
2.37.2




[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux