Extend create_srq to support basic and xrc SRQs. Drop srq->pd in favor of the PD referenced by rdma-core. Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> --- drivers/infiniband/sw/rxe/rxe_loc.h | 5 +- drivers/infiniband/sw/rxe/rxe_srq.c | 71 ++++++++++++++------------- drivers/infiniband/sw/rxe/rxe_verbs.c | 10 ++-- drivers/infiniband/sw/rxe/rxe_verbs.h | 22 ++++++++- 4 files changed, 66 insertions(+), 42 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index b4d45c592bd7..eac56e0c64ba 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -161,15 +161,12 @@ void retransmit_timer(struct timer_list *t); void rnr_nak_timer(struct timer_list *t); /* rxe_srq.c */ -#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT) - +int rxe_srq_chk_init_attr(struct rxe_dev *rxe, struct ib_srq_init_attr *init); int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); - int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp); - int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata); diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index a9e7817e2732..edbfda0cc242 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -9,6 +9,32 @@ #include "rxe_loc.h" #include "rxe_queue.h" +int rxe_srq_chk_init_attr(struct rxe_dev *rxe, struct ib_srq_init_attr *init) +{ + switch (init->srq_type) { + case IB_SRQT_BASIC: + case IB_SRQT_XRC: + break; + case IB_SRQT_TM: + pr_warn("Tag matching SRQ not supported\n"); + return -EOPNOTSUPP; + default: + pr_warn("Unexpected SRQ type (%d)\n", init->srq_type); + return -EINVAL; + } + + if (init->attr.max_sge > rxe->attr.max_srq_sge) { + pr_warn("max_sge(%d) > max_srq_sge(%d)\n", + init->attr.max_sge, rxe->attr.max_srq_sge); + return -EINVAL; + } + + if (init->attr.max_sge < RXE_MIN_SRQ_SGE) + init->attr.max_sge = RXE_MIN_SRQ_SGE; + + return rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_MAX_WR); +} + int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) { @@ -48,23 +74,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) { pr_warn("srq_limit (%d) > cur limit(%d)\n", - attr->srq_limit, - srq->rq.queue->buf->index_mask); + attr->srq_limit, + srq->rq.queue->buf->index_mask); goto err1; } } - if (mask == IB_SRQ_INIT_MASK) { - if (attr->max_sge > rxe->attr.max_srq_sge) { - pr_warn("max_sge(%d) > max_srq_sge(%d)\n", - attr->max_sge, rxe->attr.max_srq_sge); - goto err1; - } - - if (attr->max_sge < RXE_MIN_SRQ_SGE) - attr->max_sge = RXE_MIN_SRQ_SGE; - } - return 0; err1: @@ -78,24 +93,22 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, int err; int srq_wqe_size; struct rxe_queue *q; - enum queue_type type; - srq->ibsrq.event_handler = init->event_handler; - srq->ibsrq.srq_context = init->srq_context; - srq->limit = init->attr.srq_limit; - srq->srq_num = srq->pelem.index; - srq->rq.max_wr = init->attr.max_wr; - srq->rq.max_sge = init->attr.max_sge; - srq->rq.is_user = srq->is_user; + srq->limit = init->attr.srq_limit; + srq->rq.max_wr = init->attr.max_wr; + srq->rq.max_sge = init->attr.max_sge; + srq->rq.is_user = srq->is_user; - srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); + if (init->srq_type == IB_SRQT_XRC) + srq->ibsrq.ext.xrc.srq_num = srq->pelem.index; + + srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); spin_lock_init(&srq->rq.producer_lock); spin_lock_init(&srq->rq.consumer_lock); - type = QUEUE_TYPE_FROM_CLIENT; - q = rxe_queue_init(rxe, &srq->rq.max_wr, - srq_wqe_size, type); + q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, + QUEUE_TYPE_FROM_CLIENT); if (!q) { pr_warn("unable to allocate queue for srq\n"); return -ENOMEM; @@ -111,14 +124,6 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, return err; } - if (uresp) { - if (copy_to_user(&uresp->srq_num, &srq->srq_num, - sizeof(uresp->srq_num))) { - rxe_queue_cleanup(q); - return -EFAULT; - } - } - return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index b4b993f1ce92..fbd1e2d70682 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -307,9 +307,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; - if (init->srq_type != IB_SRQT_BASIC) - return -EOPNOTSUPP; - if (udata) { if (udata->outlen < sizeof(*uresp)) return -EINVAL; @@ -319,7 +316,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, srq->is_user = false; } - err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); + err = rxe_srq_chk_init_attr(rxe, init); if (err) goto err_out; @@ -327,6 +324,8 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, if (err) goto err_out; + rxe_add_index(srq); + err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) goto err_drop_srq_ref; @@ -334,6 +333,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, return 0; err_drop_srq_ref: + rxe_drop_index(srq); rxe_drop_ref(srq); err_out: return err; @@ -391,7 +391,9 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) if (srq->rq.queue) rxe_queue_cleanup(srq->rq.queue); + rxe_drop_index(srq); rxe_drop_ref(srq); + return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 5b75de74a992..52599f398ddd 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -104,7 +104,6 @@ struct rxe_srq { struct ib_srq ibsrq; struct rxe_pool_entry pelem; struct rxe_rq rq; - u32 srq_num; bool is_user; int limit; @@ -542,11 +541,32 @@ static inline enum ib_qp_type rxe_qp_type(struct rxe_qp *qp) return qp->ibqp.qp_type; } +/* SRQ extractors */ +static inline struct rxe_cq *rxe_srq_cq(struct rxe_srq *srq) +{ + return to_rcq(srq->ibsrq.ext.cq); +} + +static inline int rxe_srq_num(struct rxe_srq *srq) +{ + return srq->ibsrq.ext.xrc.srq_num; +} + static inline struct rxe_pd *rxe_srq_pd(struct rxe_srq *srq) { return to_rpd(srq->ibsrq.pd); } +static inline enum ib_srq_type rxe_srq_type(struct rxe_srq *srq) +{ + return srq->ibsrq.srq_type; +} + +static inline struct rxe_xrcd *rxe_srq_xrcd(struct rxe_srq *srq) +{ + return to_rxrcd(srq->ibsrq.ext.xrc.xrcd); +} + int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); void rxe_mc_cleanup(struct rxe_pool_entry *arg); -- 2.30.2