---
V1->V2: Fix the build warning by adding a static
---
drivers/infiniband/hw/irdma/verbs.c | 158 ++++++++++++++++++++++++++++
1 file changed, 158 insertions(+)
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f6973ea55eda..1572baa93856 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2912,6 +2912,163 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
*pd, u64 start, u64 len,
return ERR_PTR(err);
}
+static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 len, u64 virt,
+ int fd, int access,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_ucontext *ucontext;
+ struct irdma_pble_alloc *palloc;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ struct irdma_mem_reg_req req;
+ u32 total, stag = 0;
+ u8 shadow_pgcnt = 1;
+ bool use_pbles = false;
+ unsigned long flags;
+ int err = -EINVAL;
+ struct ib_umem_dmabuf *umem_dmabuf;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd,
+ access);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n",
err);
+ return ERR_PTR(err);
+ }
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = &umem_dmabuf->umem;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->page_size = PAGE_SIZE;
+
+ if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
+ iwmr->page_size = ib_umem_find_best_pgsz(iwmr->region,
+ iwdev->rf-
sc_dev.hw_attrs.page_size_cap,
+ virt);
+ if (unlikely(!iwmr->page_size)) {
+ kfree(iwmr);
+ ib_umem_release(iwmr->region);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ }
+ iwmr->len = iwmr->region->length;
+ iwpbl->user_base = virt;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->type = req.reg_type;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(iwmr->region,
+iwmr->page_size);
+
+ switch (req.reg_type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+ total = req.sq_pages + req.rq_pages;
+ use_pbles = (total > 2);
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct
irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ use_pbles = (req.cq_pages > 1);
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct
irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;