[PATCH rdma-core 1/2] cxgb4: refactor the flush logic

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently c4iw_flush_qp() assumes that qp lock is held by
the caller, now it has been changed so that c4iw_flush_qp()
can lock everything once. And the locking hierarchy is rcq
first, then scq (if different), then qp.

Signed-off-by: Raju Rangoju <rajur@xxxxxxxxxxx>
Reviewed-by: Steve Wise <swise@xxxxxxxxxxxxxxxxxxxxx>
---
 providers/cxgb4/qp.c    | 41 ++++++++++++++++++++++-------------------
 providers/cxgb4/verbs.c | 12 ++++--------
 2 files changed, 26 insertions(+), 27 deletions(-)

diff --git a/providers/cxgb4/qp.c b/providers/cxgb4/qp.c
index af04e3a1..46806341 100644
--- a/providers/cxgb4/qp.c
+++ b/providers/cxgb4/qp.c
@@ -488,44 +488,49 @@ static void update_qp_state(struct c4iw_qp *qhp)
 		qhp->ibv_qp.state = attr.qp_state;
 }
 
-/*
- * Assumes qhp lock is held.
- */
 void c4iw_flush_qp(struct c4iw_qp *qhp)
 {
 	struct c4iw_cq *rchp, *schp;
 	int count;
 
-	if (qhp->wq.flushed)
-		return;
-
-	update_qp_state(qhp);
-
 	rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq);
 	schp = to_c4iw_cq(qhp->ibv_qp.send_cq);
 
 	PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
-	qhp->wq.flushed = 1;
-	pthread_spin_unlock(&qhp->lock);
 
 	/* locking heirarchy: cq lock first, then qp lock. */
 	pthread_spin_lock(&rchp->lock);
+	if (schp != rchp)
+		pthread_spin_lock(&schp->lock);
 	pthread_spin_lock(&qhp->lock);
+
+	if (qhp->wq.flushed) {
+		pthread_spin_unlock(&qhp->lock);
+		if (rchp != schp)
+			pthread_spin_unlock(&schp->lock);
+		pthread_spin_unlock(&rchp->lock);
+		return;
+	}
+
+	qhp->wq.flushed = 1;
+	t4_set_wq_in_error(&qhp->wq);
+
+	update_qp_state(qhp);
+
 	c4iw_flush_hw_cq(rchp);
 	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
 	c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
-	pthread_spin_unlock(&qhp->lock);
-	pthread_spin_unlock(&rchp->lock);
 
-	/* locking heirarchy: cq lock first, then qp lock. */
-	pthread_spin_lock(&schp->lock);
-	pthread_spin_lock(&qhp->lock);
 	if (schp != rchp)
 		c4iw_flush_hw_cq(schp);
+
 	c4iw_flush_sq(qhp);
+
 	pthread_spin_unlock(&qhp->lock);
-	pthread_spin_unlock(&schp->lock);
-	pthread_spin_lock(&qhp->lock);
+	if (schp != rchp)
+		pthread_spin_unlock(&schp->lock);
+	pthread_spin_unlock(&rchp->lock);
+
 }
 
 void c4iw_flush_qps(struct c4iw_dev *dev)
@@ -537,9 +542,7 @@ void c4iw_flush_qps(struct c4iw_dev *dev)
 		struct c4iw_qp *qhp = dev->qpid2ptr[i];
 		if (qhp) {
 			if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) {
-				pthread_spin_lock(&qhp->lock);
 				c4iw_flush_qp(qhp);
-				pthread_spin_unlock(&qhp->lock);
 			}
 		}
 	}
diff --git a/providers/cxgb4/verbs.c b/providers/cxgb4/verbs.c
index 3c493697..988b62a7 100644
--- a/providers/cxgb4/verbs.c
+++ b/providers/cxgb4/verbs.c
@@ -584,9 +584,9 @@ int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
 	int ret;
 
 	PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
-	pthread_spin_lock(&qhp->lock);
 	if (t4_wq_in_error(&qhp->wq))
 		c4iw_flush_qp(qhp);
+	pthread_spin_lock(&qhp->lock);
 	ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
 	if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
 		reset_qp(qhp);
@@ -601,9 +601,7 @@ int c4iw_destroy_qp(struct ibv_qp *ibqp)
 	struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device);
 
 	PDBG("%s enter qp %p\n", __func__, ibqp);
-	pthread_spin_lock(&qhp->lock);
 	c4iw_flush_qp(qhp);
-	pthread_spin_unlock(&qhp->lock);
 
 	ret = ibv_cmd_destroy_qp(ibqp);
 	if (ret) {
@@ -635,9 +633,9 @@ int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
 	int ret;
 
-	pthread_spin_lock(&qhp->lock);
 	if (t4_wq_in_error(&qhp->wq))
 		c4iw_flush_qp(qhp);
+	pthread_spin_lock(&qhp->lock);
 	ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
 	pthread_spin_unlock(&qhp->lock);
 	return ret;
@@ -659,9 +657,9 @@ int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
 	int ret;
 
-	pthread_spin_lock(&qhp->lock);
 	if (t4_wq_in_error(&qhp->wq))
 		c4iw_flush_qp(qhp);
+	pthread_spin_lock(&qhp->lock);
 	ret = ibv_cmd_attach_mcast(ibqp, gid, lid);
 	pthread_spin_unlock(&qhp->lock);
 	return ret;
@@ -673,9 +671,9 @@ int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
 	int ret;
 
-	pthread_spin_lock(&qhp->lock);
 	if (t4_wq_in_error(&qhp->wq))
 		c4iw_flush_qp(qhp);
+	pthread_spin_lock(&qhp->lock);
 	ret = ibv_cmd_detach_mcast(ibqp, gid, lid);
 	pthread_spin_unlock(&qhp->lock);
 	return ret;
@@ -694,9 +692,7 @@ void c4iw_async_event(struct ibv_async_event *event)
 	case IBV_EVENT_QP_ACCESS_ERR:
 	case IBV_EVENT_PATH_MIG_ERR: {
 		struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
-		pthread_spin_lock(&qhp->lock);
 		c4iw_flush_qp(qhp);
-		pthread_spin_unlock(&qhp->lock);
 		break;
 	}
 	case IBV_EVENT_SQ_DRAINED:
-- 
2.12.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux