[rdma-core 3/4] bnxt_re/lib: consolidate hwque and swque in common structure

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Consolidating hardware queue (hwque) and software queue (swque)
under a single bookkeeping data structure bnxt_re_joint_queue.

This is to ease the hardware and software queue management. Further
reduces the size of bnxt_re_qp structure.

Signed-off-by: Devesh Sharma <devesh.sharma@xxxxxxxxxxxx>
---
 providers/bnxt_re/db.c    |   6 +-
 providers/bnxt_re/main.h  |  13 ++--
 providers/bnxt_re/verbs.c | 133 +++++++++++++++++++++-----------------
 3 files changed, 87 insertions(+), 65 deletions(-)

diff --git a/providers/bnxt_re/db.c b/providers/bnxt_re/db.c
index 85da182e..3c797573 100644
--- a/providers/bnxt_re/db.c
+++ b/providers/bnxt_re/db.c
@@ -63,7 +63,8 @@ void bnxt_re_ring_rq_db(struct bnxt_re_qp *qp)
 {
 	struct bnxt_re_db_hdr hdr;
 
-	bnxt_re_init_db_hdr(&hdr, qp->rqq->tail, qp->qpid, BNXT_RE_QUE_TYPE_RQ);
+	bnxt_re_init_db_hdr(&hdr, qp->jrqq->hwque->tail,
+			    qp->qpid, BNXT_RE_QUE_TYPE_RQ);
 	bnxt_re_ring_db(qp->udpi, &hdr);
 }
 
@@ -71,7 +72,8 @@ void bnxt_re_ring_sq_db(struct bnxt_re_qp *qp)
 {
 	struct bnxt_re_db_hdr hdr;
 
-	bnxt_re_init_db_hdr(&hdr, qp->sqq->tail, qp->qpid, BNXT_RE_QUE_TYPE_SQ);
+	bnxt_re_init_db_hdr(&hdr, qp->jsqq->hwque->tail,
+			    qp->qpid, BNXT_RE_QUE_TYPE_SQ);
 	bnxt_re_ring_db(qp->udpi, &hdr);
 }
 
diff --git a/providers/bnxt_re/main.h b/providers/bnxt_re/main.h
index 368297e6..d470e30a 100644
--- a/providers/bnxt_re/main.h
+++ b/providers/bnxt_re/main.h
@@ -120,13 +120,18 @@ struct bnxt_re_srq {
 	bool arm_req;
 };
 
+struct bnxt_re_joint_queue {
+	struct bnxt_re_queue *hwque;
+	struct bnxt_re_wrid *swque;
+	uint32_t start_idx;
+	uint32_t last_idx;
+};
+
 struct bnxt_re_qp {
 	struct ibv_qp ibvqp;
 	struct bnxt_re_chip_ctx *cctx;
-	struct bnxt_re_queue *sqq;
-	struct bnxt_re_wrid *swrid;
-	struct bnxt_re_queue *rqq;
-	struct bnxt_re_wrid *rwrid;
+	struct bnxt_re_joint_queue *jsqq;
+	struct bnxt_re_joint_queue *jrqq;
 	struct bnxt_re_srq *srq;
 	struct bnxt_re_cq *scq;
 	struct bnxt_re_cq *rcq;
diff --git a/providers/bnxt_re/verbs.c b/providers/bnxt_re/verbs.c
index 760e840a..4344b3dd 100644
--- a/providers/bnxt_re/verbs.c
+++ b/providers/bnxt_re/verbs.c
@@ -242,7 +242,7 @@ static uint8_t bnxt_re_poll_err_scqe(struct bnxt_re_qp *qp,
 				     struct bnxt_re_bcqe *hdr,
 				     struct bnxt_re_req_cqe *scqe, int *cnt)
 {
-	struct bnxt_re_queue *sq = qp->sqq;
+	struct bnxt_re_queue *sq = qp->jsqq->hwque;
 	struct bnxt_re_context *cntx;
 	struct bnxt_re_wrid *swrid;
 	struct bnxt_re_psns *spsn;
@@ -252,7 +252,7 @@ static uint8_t bnxt_re_poll_err_scqe(struct bnxt_re_qp *qp,
 
 	scq = to_bnxt_re_cq(qp->ibvqp.send_cq);
 	cntx = to_bnxt_re_context(scq->ibvcq.context);
-	swrid = &qp->swrid[head];
+	swrid = &qp->jsqq->swque[head];
 	spsn = swrid->psns;
 
 	*cnt = 1;
@@ -267,7 +267,7 @@ static uint8_t bnxt_re_poll_err_scqe(struct bnxt_re_qp *qp,
 			BNXT_RE_PSNS_OPCD_MASK;
 	ibvwc->byte_len = 0;
 
-	bnxt_re_incr_head(qp->sqq);
+	bnxt_re_incr_head(sq);
 
 	if (qp->qpst != IBV_QPS_ERR)
 		qp->qpst = IBV_QPS_ERR;
@@ -284,14 +284,14 @@ static uint8_t bnxt_re_poll_success_scqe(struct bnxt_re_qp *qp,
 					 struct bnxt_re_req_cqe *scqe,
 					 int *cnt)
 {
-	struct bnxt_re_queue *sq = qp->sqq;
+	struct bnxt_re_queue *sq = qp->jsqq->hwque;
 	struct bnxt_re_wrid *swrid;
 	struct bnxt_re_psns *spsn;
-	uint8_t pcqe = false;
 	uint32_t head = sq->head;
+	uint8_t pcqe = false;
 	uint32_t cindx;
 
-	swrid = &qp->swrid[head];
+	swrid = &qp->jsqq->swque[head];
 	spsn = swrid->psns;
 	cindx = le32toh(scqe->con_indx);
 
@@ -361,8 +361,8 @@ static int bnxt_re_poll_err_rcqe(struct bnxt_re_qp *qp, struct ibv_wc *ibvwc,
 	cntx = to_bnxt_re_context(rcq->ibvcq.context);
 
 	if (!qp->srq) {
-		rq = qp->rqq;
-		ibvwc->wr_id = qp->rwrid[rq->head].wrid;
+		rq = qp->jrqq->hwque;
+		ibvwc->wr_id = qp->jrqq->swque[rq->head].wrid;
 	} else {
 		struct bnxt_re_srq *srq;
 		int tag;
@@ -423,8 +423,8 @@ static void bnxt_re_poll_success_rcqe(struct bnxt_re_qp *qp,
 
 	rcqe = cqe;
 	if (!qp->srq) {
-		rq = qp->rqq;
-		ibvwc->wr_id = qp->rwrid[rq->head].wrid;
+		rq = qp->jrqq->hwque;
+		ibvwc->wr_id = qp->jrqq->swque[rq->head].wrid;
 	} else {
 		struct bnxt_re_srq *srq;
 		int tag;
@@ -648,13 +648,13 @@ static int bnxt_re_poll_flush_wqes(struct bnxt_re_cq *cq,
 			if (sq_list) {
 				qp = container_of(cur, struct bnxt_re_qp,
 						  snode);
-				que = qp->sqq;
-				wridp = qp->swrid;
+				que = qp->jsqq->hwque;
+				wridp = qp->jsqq->swque;
 			} else {
 				qp = container_of(cur, struct bnxt_re_qp,
 						  rnode);
-				que = qp->rqq;
-				wridp = qp->rwrid;
+				que = qp->jrqq->hwque;
+				wridp = qp->jrqq->swque;
 			}
 			if (bnxt_re_is_que_empty(que))
 				continue;
@@ -802,55 +802,68 @@ static int bnxt_re_check_qp_limits(struct bnxt_re_context *cntx,
 
 static void bnxt_re_free_queue_ptr(struct bnxt_re_qp *qp)
 {
-	if (qp->rqq)
-		free(qp->rqq);
-	if (qp->sqq)
-		free(qp->sqq);
+	free(qp->jrqq->hwque);
+	free(qp->jrqq);
+	free(qp->jsqq->hwque);
+	free(qp->jsqq);
 }
 
 static int bnxt_re_alloc_queue_ptr(struct bnxt_re_qp *qp,
 				   struct ibv_qp_init_attr *attr)
 {
-	qp->sqq = calloc(1, sizeof(struct bnxt_re_queue));
-	if (!qp->sqq)
-		return -ENOMEM;
+	int rc = -ENOMEM;
+
+	qp->jsqq = calloc(1, sizeof(struct bnxt_re_joint_queue));
+	if (!qp->jsqq)
+		return rc;
+	qp->jsqq->hwque = calloc(1, sizeof(struct bnxt_re_queue));
+	if (!qp->jsqq->hwque)
+		goto fail;
+
 	if (!attr->srq) {
-		qp->rqq = calloc(1, sizeof(struct bnxt_re_queue));
-		if (!qp->rqq) {
-			free(qp->sqq);
-			return -ENOMEM;
+		qp->jrqq = calloc(1, sizeof(struct bnxt_re_joint_queue));
+		if (!qp->jrqq) {
+			free(qp->jsqq);
+			goto fail;
 		}
+		qp->jrqq->hwque = calloc(1, sizeof(struct bnxt_re_queue));
+		if (!qp->jrqq->hwque)
+			goto fail;
 	}
 
 	return 0;
+fail:
+	bnxt_re_free_queue_ptr(qp);
+	return rc;
 }
 
 static void bnxt_re_free_queues(struct bnxt_re_qp *qp)
 {
-	if (qp->rqq) {
-		if (qp->rwrid)
-			free(qp->rwrid);
-		pthread_spin_destroy(&qp->rqq->qlock);
-		bnxt_re_free_aligned(qp->rqq);
+	if (qp->jrqq) {
+		if (qp->jrqq->swque)
+			free(qp->jrqq->swque);
+		pthread_spin_destroy(&qp->jrqq->hwque->qlock);
+		bnxt_re_free_aligned(qp->jrqq->hwque);
 	}
 
-	if (qp->swrid)
-		free(qp->swrid);
-	pthread_spin_destroy(&qp->sqq->qlock);
-	bnxt_re_free_aligned(qp->sqq);
+	if (qp->jsqq->swque)
+		free(qp->jsqq->swque);
+	pthread_spin_destroy(&qp->jsqq->hwque->qlock);
+	bnxt_re_free_aligned(qp->jsqq->hwque);
 }
 
 static int bnxt_re_alloc_queues(struct bnxt_re_qp *qp,
 				struct ibv_qp_init_attr *attr,
 				uint32_t pg_size) {
 	struct bnxt_re_psns_ext *psns_ext;
+	struct bnxt_re_wrid *swque;
 	struct bnxt_re_queue *que;
 	struct bnxt_re_psns *psns;
 	uint32_t psn_depth;
 	uint32_t psn_size;
 	int ret, indx;
 
-	que = qp->sqq;
+	que = qp->jsqq->hwque;
 	que->stride = bnxt_re_get_sqe_sz();
 	/* 8916 adjustment */
 	que->depth = roundup_pow_of_two(attr->cap.max_send_wr + 1 +
@@ -870,7 +883,7 @@ static int bnxt_re_alloc_queues(struct bnxt_re_qp *qp,
 	 * is UD-qp. UD-qp use this memory to maintain WC-opcode.
 	 * See definition of bnxt_re_fill_psns() for the use case.
 	 */
-	ret = bnxt_re_alloc_aligned(qp->sqq, pg_size);
+	ret = bnxt_re_alloc_aligned(que, pg_size);
 	if (ret)
 		return ret;
 	/* exclude psns depth*/
@@ -878,36 +891,38 @@ static int bnxt_re_alloc_queues(struct bnxt_re_qp *qp,
 	/* start of spsn space sizeof(struct bnxt_re_psns) each. */
 	psns = (que->va + que->stride * que->depth);
 	psns_ext = (struct bnxt_re_psns_ext *)psns;
-	pthread_spin_init(&que->qlock, PTHREAD_PROCESS_PRIVATE);
-	qp->swrid = calloc(que->depth, sizeof(struct bnxt_re_wrid));
-	if (!qp->swrid) {
+	swque = calloc(que->depth, sizeof(struct bnxt_re_wrid));
+	if (!swque) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
 	for (indx = 0 ; indx < que->depth; indx++, psns++)
-		qp->swrid[indx].psns = psns;
+		swque[indx].psns = psns;
 	if (bnxt_re_is_chip_gen_p5(qp->cctx)) {
 		for (indx = 0 ; indx < que->depth; indx++, psns_ext++) {
-			qp->swrid[indx].psns_ext = psns_ext;
-			qp->swrid[indx].psns = (struct bnxt_re_psns *)psns_ext;
+			swque[indx].psns_ext = psns_ext;
+			swque[indx].psns = (struct bnxt_re_psns *)psns_ext;
 		}
 	}
+	qp->jsqq->swque = swque;
 
 	qp->cap.max_swr = que->depth;
+	pthread_spin_init(&que->qlock, PTHREAD_PROCESS_PRIVATE);
 
-	if (qp->rqq) {
-		que = qp->rqq;
+	if (qp->jrqq) {
+		que = qp->jrqq->hwque;
 		que->stride = bnxt_re_get_rqe_sz();
 		que->depth = roundup_pow_of_two(attr->cap.max_recv_wr + 1);
 		que->diff = que->depth - attr->cap.max_recv_wr;
-		ret = bnxt_re_alloc_aligned(qp->rqq, pg_size);
+		ret = bnxt_re_alloc_aligned(que, pg_size);
 		if (ret)
 			goto fail;
 		pthread_spin_init(&que->qlock, PTHREAD_PROCESS_PRIVATE);
 		/* For RQ only bnxt_re_wri.wrid is used. */
-		qp->rwrid = calloc(que->depth, sizeof(struct bnxt_re_wrid));
-		if (!qp->rwrid) {
+		qp->jrqq->swque = calloc(que->depth,
+					 sizeof(struct bnxt_re_wrid));
+		if (!qp->jrqq->swque) {
 			ret = -ENOMEM;
 			goto fail;
 		}
@@ -946,8 +961,8 @@ struct ibv_qp *bnxt_re_create_qp(struct ibv_pd *ibvpd,
 		goto failq;
 	/* Fill ibv_cmd */
 	cap = &qp->cap;
-	req.qpsva = (uintptr_t)qp->sqq->va;
-	req.qprva = qp->rqq ? (uintptr_t)qp->rqq->va : 0;
+	req.qpsva = (uintptr_t)qp->jsqq->hwque->va;
+	req.qprva = qp->jrqq ? (uintptr_t)qp->jrqq->hwque->va : 0;
 	req.qp_handle = (uintptr_t)qp;
 
 	if (ibv_cmd_create_qp(ibvpd, &qp->ibvqp, attr, &req.ibv_cmd, sizeof(req),
@@ -995,11 +1010,11 @@ int bnxt_re_modify_qp(struct ibv_qp *ibvqp, struct ibv_qp_attr *attr,
 			qp->qpst = attr->qp_state;
 			/* transition to reset */
 			if (qp->qpst == IBV_QPS_RESET) {
-				qp->sqq->head = 0;
-				qp->sqq->tail = 0;
-				if (qp->rqq) {
-					qp->rqq->head = 0;
-					qp->rqq->tail = 0;
+				qp->jsqq->hwque->head = 0;
+				qp->jsqq->hwque->tail = 0;
+				if (qp->jrqq) {
+					qp->jrqq->hwque->head = 0;
+					qp->jrqq->hwque->tail = 0;
 				}
 			}
 		}
@@ -1257,7 +1272,7 @@ int bnxt_re_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 		      struct ibv_send_wr **bad)
 {
 	struct bnxt_re_qp *qp = to_bnxt_re_qp(ibvqp);
-	struct bnxt_re_queue *sq = qp->sqq;
+	struct bnxt_re_queue *sq = qp->jsqq->hwque;
 	struct bnxt_re_wrid *wrid;
 	uint8_t is_inline = false;
 	struct bnxt_re_bsqe *hdr;
@@ -1289,7 +1304,7 @@ int bnxt_re_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 		}
 
 		sqe = (void *)(sq->va + (sq->tail * sq->stride));
-		wrid = &qp->swrid[sq->tail];
+		wrid = &qp->jsqq->swque[sq->tail];
 
 		memset(sqe, 0, bnxt_re_get_sqe_sz());
 		hdr = sqe;
@@ -1376,7 +1391,7 @@ static int bnxt_re_build_rqe(struct bnxt_re_qp *qp, struct ibv_recv_wr *wr,
 	uint32_t hdrval;
 
 	sge = (rqe + bnxt_re_get_rqe_hdr_sz());
-	wrid = &qp->rwrid[qp->rqq->tail];
+	wrid = &qp->jrqq->swque[qp->jrqq->hwque->tail];
 
 	len = bnxt_re_build_sge(sge, wr->sg_list, wr->num_sge, false);
 	wqe_sz = wr->num_sge + (bnxt_re_get_rqe_hdr_sz() >> 4); /* 16B align */
@@ -1388,7 +1403,7 @@ static int bnxt_re_build_rqe(struct bnxt_re_qp *qp, struct ibv_recv_wr *wr,
 	hdrval = BNXT_RE_WR_OPCD_RECV;
 	hdrval |= ((wqe_sz & BNXT_RE_HDR_WS_MASK) << BNXT_RE_HDR_WS_SHIFT);
 	hdr->rsv_ws_fl_wt = htole32(hdrval);
-	hdr->wrid = htole32(qp->rqq->tail);
+	hdr->wrid = htole32(qp->jrqq->hwque->tail);
 
 	/* Fill wrid */
 	wrid->wrid = wr->wr_id;
@@ -1402,7 +1417,7 @@ int bnxt_re_post_recv(struct ibv_qp *ibvqp, struct ibv_recv_wr *wr,
 		      struct ibv_recv_wr **bad)
 {
 	struct bnxt_re_qp *qp = to_bnxt_re_qp(ibvqp);
-	struct bnxt_re_queue *rq = qp->rqq;
+	struct bnxt_re_queue *rq = qp->jrqq->hwque;
 	void *rqe;
 	int ret;
 
-- 
2.25.1

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux