Re: [PATCH v3 rdma-next 3/3] qedr: Add user space support for SRQ

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jul 23, 2018 at 04:30:03PM +0300, Yuval Bason wrote:
> This patch adds support for SRQ's created in user space and update
> qedr_affiliated_event to deal with general SRQ events.
>
> Signed-off-by: Michal Kalderon <michal.kalderon@xxxxxxxxxx>
> Signed-off-by: Yuval Bason <yuval.bason@xxxxxxxxxx>
> ---
>  drivers/infiniband/hw/qedr/main.c  |  95 ++++++++++++++++++-------
>  drivers/infiniband/hw/qedr/verbs.c | 137 +++++++++++++++++++++++++++++++------
>  include/uapi/rdma/qedr-abi.h       |  17 +++++
>  3 files changed, 205 insertions(+), 44 deletions(-)
>
> diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
> index 2642caf..51e4f7e 100644
> --- a/drivers/infiniband/hw/qedr/main.c
> +++ b/drivers/infiniband/hw/qedr/main.c
> @@ -191,6 +191,11 @@ static int qedr_register_device(struct qedr_dev *dev)
>  				     QEDR_UVERBS(MODIFY_QP) |
>  				     QEDR_UVERBS(QUERY_QP) |
>  				     QEDR_UVERBS(DESTROY_QP) |
> +				     QEDR_UVERBS(CREATE_SRQ) |
> +				     QEDR_UVERBS(DESTROY_SRQ) |
> +				     QEDR_UVERBS(QUERY_SRQ) |
> +				     QEDR_UVERBS(MODIFY_SRQ) |
> +				     QEDR_UVERBS(POST_SRQ_RECV) |
>  				     QEDR_UVERBS(REG_MR) |
>  				     QEDR_UVERBS(DEREG_MR) |
>  				     QEDR_UVERBS(POLL_CQ) |
> @@ -658,42 +663,69 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
>  #define EVENT_TYPE_NOT_DEFINED	0
>  #define EVENT_TYPE_CQ		1
>  #define EVENT_TYPE_QP		2
> +#define EVENT_TYPE_SRQ		3
>  	struct qedr_dev *dev = (struct qedr_dev *)context;
>  	struct regpair *async_handle = (struct regpair *)fw_handle;
>  	u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
>  	u8 event_type = EVENT_TYPE_NOT_DEFINED;
>  	struct ib_event event;
> +	struct ib_srq *ibsrq;
> +	struct qedr_srq *srq;
>  	struct ib_cq *ibcq;
>  	struct ib_qp *ibqp;
>  	struct qedr_cq *cq;
>  	struct qedr_qp *qp;
> +	u16 srq_id;
>
> -	switch (e_code) {
> -	case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
> -		event.event = IB_EVENT_CQ_ERR;
> -		event_type = EVENT_TYPE_CQ;
> -		break;
> -	case ROCE_ASYNC_EVENT_SQ_DRAINED:
> -		event.event = IB_EVENT_SQ_DRAINED;
> -		event_type = EVENT_TYPE_QP;
> -		break;
> -	case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
> -		event.event = IB_EVENT_QP_FATAL;
> -		event_type = EVENT_TYPE_QP;
> -		break;
> -	case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
> -		event.event = IB_EVENT_QP_REQ_ERR;
> -		event_type = EVENT_TYPE_QP;
> -		break;
> -	case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
> -		event.event = IB_EVENT_QP_ACCESS_ERR;
> -		event_type = EVENT_TYPE_QP;
> -		break;
> -	default:
> +	if (!IS_IWARP(dev)) {

If (IS_IWARP(dev)) {} else {}, please, without extra "!".

> +		switch (e_code) {
> +		case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
> +			event.event = IB_EVENT_CQ_ERR;
> +			event_type = EVENT_TYPE_CQ;
> +			break;
> +		case ROCE_ASYNC_EVENT_SQ_DRAINED:
> +			event.event = IB_EVENT_SQ_DRAINED;
> +			event_type = EVENT_TYPE_QP;
> +			break;
> +		case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
> +			event.event = IB_EVENT_QP_FATAL;
> +			event_type = EVENT_TYPE_QP;
> +			break;
> +		case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
> +			event.event = IB_EVENT_QP_REQ_ERR;
> +			event_type = EVENT_TYPE_QP;
> +			break;
> +		case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
> +			event.event = IB_EVENT_QP_ACCESS_ERR;
> +			event_type = EVENT_TYPE_QP;
> +			break;
> +		case ROCE_ASYNC_EVENT_SRQ_LIMIT:
> +			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
> +			event_type = EVENT_TYPE_SRQ;
> +			break;
> +		case ROCE_ASYNC_EVENT_SRQ_EMPTY:
> +			event.event = IB_EVENT_SRQ_ERR;
> +			event_type = EVENT_TYPE_SRQ;
> +			break;
> +		default:
> +			DP_ERR(dev, "unsupported event %d on handle=%llx\n",
> +			       e_code, roce_handle64);
> +		}
> +	} else {
> +		switch (e_code) {
> +		case QED_IWARP_EVENT_SRQ_LIMIT:
> +			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
> +			event_type = EVENT_TYPE_SRQ;
> +			break;
> +		case QED_IWARP_EVENT_SRQ_EMPTY:
> +			event.event = IB_EVENT_SRQ_ERR;
> +			event_type = EVENT_TYPE_SRQ;
> +			break;
> +		default:
>  		DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
>  		       roce_handle64);
> +		}
>  	}
> -
>  	switch (event_type) {
>  	case EVENT_TYPE_CQ:
>  		cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
> @@ -727,6 +759,23 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
>  		}
>  		DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
>  		break;
> +	case EVENT_TYPE_SRQ:
> +		srq_id = (u16)roce_handle64;
> +		srq = idr_find(&dev->srqidr.idr, srq_id);

You should wrap idr_find() with RCU read lock/unlock.

> +		if (srq) {
> +			ibsrq = &srq->ibsrq;
> +			if (ibsrq->event_handler) {
> +				event.device = ibsrq->device;
> +				event.element.srq = ibsrq;
> +				ibsrq->event_handler(&event,
> +						     ibsrq->srq_context);
> +			}
> +		} else {
> +			DP_NOTICE(dev,
> +				  "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
> +				  roce_handle64);
> +		}
> +		DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
>  	default:
>  		break;
>  	}
> diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
> index e48861a..04544be 100644
> --- a/drivers/infiniband/hw/qedr/verbs.c
> +++ b/drivers/infiniband/hw/qedr/verbs.c
> @@ -1199,6 +1199,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
>  	return 0;
>  }
>
> +static int qedr_copy_srq_uresp(struct qedr_dev *dev,
> +			       struct qedr_srq *srq, struct ib_udata *udata)
> +{
> +	struct qedr_create_srq_uresp uresp = {};
> +	int rc;
> +
> +	uresp.srq_id = srq->srq_id;
> +
> +	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
> +	if (rc)
> +		DP_ERR(dev, "create srq: problem copying data to user space\n");
> +
> +	return rc;
> +}
> +
>  static void qedr_copy_rq_uresp(struct qedr_dev *dev,
>  			       struct qedr_create_qp_uresp *uresp,
>  			       struct qedr_qp *qp)
> @@ -1320,6 +1335,13 @@ static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
>  	return 0;
>  }
>
> +static void qedr_free_srq_user_params(struct qedr_srq *srq)
> +{
> +	qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
> +	ib_umem_release(srq->usrq.umem);
> +	ib_umem_release(srq->prod_umem);
> +}
> +
>  static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
>  {
>  	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
> @@ -1332,6 +1354,37 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
>  			  hw_srq->phy_prod_pair_addr);
>  }
>
> +static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
> +				     struct qedr_srq *srq,
> +				     struct qedr_create_srq_ureq *ureq,
> +				     int access, int dmasync)
> +{
> +	struct scatterlist *sg;
> +	int rc;
> +
> +	rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
> +				  ureq->srq_len, access, dmasync, 1);
> +	if (rc)
> +		return rc;
> +
> +	srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
> +				     sizeof(struct rdma_srq_producers),
> +				     access, dmasync);
> +	if (IS_ERR(srq->prod_umem)) {
> +		qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
> +		ib_umem_release(srq->usrq.umem);
> +		DP_ERR(srq->dev,
> +		       "create srq: failed ib_umem_get for producer, got %ld\n",
> +		       PTR_ERR(srq->prod_umem));
> +		return PTR_ERR(srq->prod_umem);
> +	}
> +
> +	sg = srq->prod_umem->sg_head.sgl;
> +	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
> +
> +	return 0;
> +}
> +
>  static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
>  					struct qedr_dev *dev,
>  					struct ib_srq_init_attr *init_attr)
> @@ -1393,10 +1446,12 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
>  	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
>  	struct qed_rdma_create_srq_out_params out_params;
>  	struct qedr_pd *pd = get_qedr_pd(ibpd);
> +	struct qedr_create_srq_ureq ureq = {};
>  	u64 pbl_base_addr, phy_prod_pair_addr;
> +	struct ib_ucontext *ib_ctx = NULL;
>  	struct qedr_srq_hwq_info *hw_srq;
> +	struct qedr_ucontext *ctx = NULL;
>  	u32 page_cnt, page_size;
> -	struct qed_chain *pbl;
>  	struct qedr_srq *srq;
>  	int rc = 0;
>
> @@ -1419,15 +1474,38 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
>  	hw_srq->max_wr = init_attr->attr.max_wr;
>  	hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
>
> -	rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
> -	if (rc)
> -		goto err0;
> +	if (udata && ibpd->uobject && ibpd->uobject->context) {
> +		ib_ctx = ibpd->uobject->context;
> +		ctx = get_qedr_ucontext(ib_ctx);
> +
> +		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
> +			DP_ERR(dev,
> +			       "create srq: problem copying data from user space\n");
> +			goto err0;
> +		}
> +
> +		rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
> +		if (rc)
> +			goto err0;
> +
> +		page_cnt = srq->usrq.pbl_info.num_pbes;
> +		pbl_base_addr = srq->usrq.pbl_tbl->pa;
> +		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
> +		page_size = BIT(srq->usrq.umem->page_shift);
> +	} else {
> +		struct qed_chain *pbl;
> +
> +		rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
> +		if (rc)
> +			goto err0;
> +
> +		pbl = &hw_srq->pbl;
> +		page_cnt = qed_chain_get_page_cnt(pbl);
> +		pbl_base_addr = qed_chain_get_pbl_phys(pbl);
> +		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
> +		page_size = QED_CHAIN_PAGE_SIZE;
> +	}
>
> -	pbl = &hw_srq->pbl;
> -	page_cnt = qed_chain_get_page_cnt(pbl);
> -	pbl_base_addr = qed_chain_get_pbl_phys(pbl);
> -	phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
> -	page_size = QED_CHAIN_PAGE_SIZE;
>  	in_params.pd_id = pd->pd_id;
>  	in_params.pbl_base_addr = pbl_base_addr;
>  	in_params.prod_pair_addr = phy_prod_pair_addr;
> @@ -1440,6 +1518,12 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
>
>  	srq->srq_id = out_params.srq_id;
>
> +	if (udata) {
> +		rc = qedr_copy_srq_uresp(dev, srq, udata);
> +		if (rc)
> +			goto err2;
> +	}
> +
>  	rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
>  	if (rc)
>  		goto err2;
> @@ -1455,7 +1539,10 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
>  	/* Intentionally ignore return value, keep the original rc */
>  	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
>  err1:
> -	qedr_free_srq_kernel_params(srq);
> +	if (udata)
> +		qedr_free_srq_user_params(srq);
> +	else
> +		qedr_free_srq_kernel_params(srq);
>  err0:
>  	kfree(srq);
>
> @@ -1471,7 +1558,10 @@ int qedr_destroy_srq(struct ib_srq *ibsrq)
>  	in_params.srq_id = srq->srq_id;
>  	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
>
> -	qedr_free_srq_kernel_params(srq);
> +	if (ibsrq->pd->uobject)
> +		qedr_free_srq_user_params(srq);
> +	else
> +		qedr_free_srq_kernel_params(srq);
>
>  	qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
>
> @@ -1599,9 +1689,10 @@ static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
>
>  	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
>  			   &qp->usq.pbl_info, FW_PAGE_SHIFT);
> -
> -	qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
> -	qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
> +	if (!qp->srq) {
> +		qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
> +		qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
> +	}
>
>  	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
>  			   &qp->urq.pbl_info, FW_PAGE_SHIFT);
> @@ -1647,11 +1738,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
>  	if (rc)
>  		return rc;
>
> -	/* RQ - read access only (0), dma sync not required (0) */
> -	rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
> -				  ureq.rq_len, 0, 0, alloc_and_init);
> -	if (rc)
> -		return rc;
> +	if (!qp->srq) {
> +		/* RQ - read access only (0), dma sync not required (0) */
> +		rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
> +					  ureq.rq_len, 0, 0, alloc_and_init);
> +		if (rc)
> +			return rc;
> +	}
>
>  	memset(&in_params, 0, sizeof(in_params));
>  	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
> @@ -1659,8 +1752,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
>  	in_params.qp_handle_hi = ureq.qp_handle_hi;
>  	in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
>  	in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
> -	in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
> -	in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
> +	if (!qp->srq) {
> +		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
> +		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
> +	}
>
>  	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
>  					      &in_params, &out_params);
> diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
> index 24c658b..2189fb4 100644
> --- a/include/uapi/rdma/qedr-abi.h
> +++ b/include/uapi/rdma/qedr-abi.h
> @@ -111,4 +111,21 @@ struct qedr_create_qp_uresp {
>  	__u32 reserved;
>  };
>
> +struct qedr_create_srq_ureq {
> +	/* user space virtual address of producer pair */
> +	__u64 prod_pair_addr;
> +
> +	/* user space virtual address of SRQ buffer */
> +	__u64 srq_addr;
> +
> +	/* length of SRQ buffer */
> +	__u64 srq_len;
> +};
> +
> +struct qedr_create_srq_uresp {
> +	__u16 srq_id;
> +	__u16 reserved0;
> +	__u32 reserved1;
> +};
> +
>  #endif /* __QEDR_USER_H__ */
> --
> 1.8.3.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

Attachment: signature.asc
Description: PGP signature


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux