Now when ib_udata is passed to all the drivers object create/destroy APIs and that ib_udata carry ib_ucontext for every user command, there is no need to pass the ib_ucontext via the functions prototypes. Make ib_udata the only argument psssed. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@xxxxxxxxxx> --- drivers/infiniband/core/cq.c | 2 +- drivers/infiniband/core/uverbs_cmd.c | 8 +-- drivers/infiniband/core/uverbs_std_types_cq.c | 3 +- drivers/infiniband/core/verbs.c | 6 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 26 ++++---- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 4 +- drivers/infiniband/hw/cxgb3/iwch_provider.c | 19 +++--- drivers/infiniband/hw/cxgb4/cq.c | 12 ++-- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 - drivers/infiniband/hw/cxgb4/provider.c | 5 +- drivers/infiniband/hw/hns/hns_roce_cq.c | 24 ++++--- drivers/infiniband/hw/hns/hns_roce_device.h | 4 +- drivers/infiniband/hw/hns/hns_roce_pd.c | 5 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 26 ++++---- drivers/infiniband/hw/mlx4/cq.c | 27 ++++---- drivers/infiniband/hw/mlx4/doorbell.c | 9 ++- drivers/infiniband/hw/mlx4/main.c | 6 +- drivers/infiniband/hw/mlx4/mlx4_ib.h | 4 +- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/infiniband/hw/mlx4/srq.c | 3 +- drivers/infiniband/hw/mlx5/cq.c | 50 ++++++++------- drivers/infiniband/hw/mlx5/main.c | 20 +++--- drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 +- drivers/infiniband/hw/mlx5/qp.c | 3 +- drivers/infiniband/hw/mthca/mthca_provider.c | 48 +++++++------- drivers/infiniband/hw/nes/nes_verbs.c | 37 ++++++----- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 62 +++++++++++++------ drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 4 +- drivers/infiniband/hw/qedr/verbs.c | 25 +++++--- drivers/infiniband/hw/qedr/verbs.h | 4 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 4 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 4 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 15 +++-- .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 16 +++-- .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 4 +- drivers/infiniband/sw/rdmavt/cq.c | 4 +- drivers/infiniband/sw/rdmavt/cq.h | 1 - drivers/infiniband/sw/rdmavt/mmap.c | 14 ++++- drivers/infiniband/sw/rdmavt/mmap.h | 2 +- drivers/infiniband/sw/rdmavt/pd.c | 4 +- drivers/infiniband/sw/rdmavt/pd.h | 3 +- drivers/infiniband/sw/rdmavt/qp.c | 5 +- drivers/infiniband/sw/rdmavt/srq.c | 5 +- drivers/infiniband/sw/rxe/rxe_cq.c | 10 +-- drivers/infiniband/sw/rxe/rxe_loc.h | 12 ++-- drivers/infiniband/sw/rxe/rxe_mmap.c | 12 +++- drivers/infiniband/sw/rxe/rxe_qp.c | 14 ++--- drivers/infiniband/sw/rxe/rxe_queue.c | 8 +-- drivers/infiniband/sw/rxe/rxe_queue.h | 4 +- drivers/infiniband/sw/rxe/rxe_srq.c | 12 ++-- drivers/infiniband/sw/rxe/rxe_verbs.c | 14 ++--- include/rdma/ib_verbs.h | 5 +- 52 files changed, 333 insertions(+), 292 deletions(-) diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 4797eef549c3..a4c81992267c 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -147,7 +147,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, struct ib_cq *cq; int ret = -ENOMEM; - cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL); + cq = dev->ops.create_cq(dev, &cq_attr, NULL); if (IS_ERR(cq)) return cq; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index df208eee540a..b96fab49bea9 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -423,7 +423,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) atomic_set(&pd->usecnt, 0); pd->res.type = RDMA_RESTRACK_PD; - ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata); + ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata); if (ret) goto err_alloc; @@ -594,8 +594,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) } if (!xrcd) { - xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context, - &attrs->driver_udata); + xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata); if (IS_ERR(xrcd)) { ret = PTR_ERR(xrcd); goto err; @@ -1010,8 +1009,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs, attr.comp_vector = cmd->comp_vector; attr.flags = cmd->flags; - cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, - &attrs->driver_udata); + cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_file; diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c index cde608c268ff..977e386009fc 100644 --- a/drivers/infiniband/core/uverbs_std_types_cq.c +++ b/drivers/infiniband/core/uverbs_std_types_cq.c @@ -111,8 +111,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->async_list); - cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, - &attrs->driver_udata); + cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_event_file; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 748663132217..a8459be94caf 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -269,7 +269,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, pd->res.type = RDMA_RESTRACK_PD; rdma_restrack_set_task(&pd->res, caller); - ret = device->ops.alloc_pd(pd, NULL, NULL); + ret = device->ops.alloc_pd(pd, NULL); if (ret) { kfree(pd); return ERR_PTR(ret); @@ -1912,7 +1912,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device, { struct ib_cq *cq; - cq = device->ops.create_cq(device, cq_attr, NULL, NULL); + cq = device->ops.create_cq(device, cq_attr, NULL); if (!IS_ERR(cq)) { cq->device = device; @@ -2145,7 +2145,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); - xrcd = device->ops.alloc_xrcd(device, NULL, NULL); + xrcd = device->ops.alloc_xrcd(device, NULL); if (!IS_ERR(xrcd)) { xrcd->device = device; xrcd->inode = NULL; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a1b9ebda2d60..8c5678147521 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -576,14 +576,14 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) &pd->qplib_pd); } -int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext, - struct ib_udata *udata) +int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_re_ucontext *ucntx = container_of(ucontext, - struct bnxt_re_ucontext, - ib_uctx); + struct bnxt_re_ucontext *ucntx = + rdma_udata_to_drv_context(udata, + struct bnxt_re_ucontext, + ib_uctx); struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); int rc; @@ -2587,7 +2587,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); @@ -2614,12 +2613,13 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, if (entries > dev_attr->max_cq_wqes + 1) entries = dev_attr->max_cq_wqes + 1; - if (context) { + if (udata) { struct bnxt_re_cq_req req; - struct bnxt_re_ucontext *uctx = container_of - (context, - struct bnxt_re_ucontext, - ib_uctx); + struct bnxt_re_ucontext *uctx = + rdma_udata_to_drv_context( + udata, + struct bnxt_re_ucontext, + ib_uctx); if (ib_copy_from_udata(&req, udata, sizeof(req))) { rc = -EFAULT; goto fail; @@ -2671,7 +2671,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, atomic_inc(&rdev->cq_count); spin_lock_init(&cq->cq_lock); - if (context) { + if (udata) { struct bnxt_re_cq_resp resp; resp.cqid = cq->qplib_cq.id; @@ -2689,7 +2689,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, return &cq->ib_cq; c2fail: - if (context) + if (udata) ib_umem_release(cq->umem); fail: kfree(cq->cql); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 44e49988600e..488dc735a260 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -163,8 +163,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, int index, union ib_gid *gid); enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, u8 port_num); -int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, @@ -197,7 +196,6 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 801beabf7db9..7fe3b984c575 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -106,7 +106,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; @@ -114,7 +113,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; - struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; @@ -127,8 +125,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, if (!chp) return ERR_PTR(-ENOMEM); - if (ib_context) { - ucontext = to_iwch_ucontext(ib_context); + if (udata) { if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); @@ -154,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); - if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { + if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) { kfree(chp); return ERR_PTR(-ENOMEM); } @@ -170,8 +167,13 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, return ERR_PTR(-ENOMEM); } - if (ucontext) { + if (udata) { struct iwch_mm_entry *mm; + struct iwch_ucontext *ucontext = + rdma_udata_to_drv_context( + udata, + struct iwch_ucontext, + ibucontext); mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { @@ -378,8 +380,7 @@ static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); } -static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct iwch_pd *php = to_iwch_pd(pd); struct ib_device *ibdev = pd->device; @@ -394,7 +395,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, php->pdid = pdid; php->rhp = rhp; - if (context) { + if (udata) { struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6a6eb2cd69be..bbfe75b63961 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -994,7 +994,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; @@ -1003,10 +1002,14 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, struct c4iw_cq *chp; struct c4iw_create_cq ucmd; struct c4iw_create_cq_resp uresp; - struct c4iw_ucontext *ucontext = NULL; int ret, wr_len; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; + struct c4iw_ucontext *ucontext = + rdma_udata_to_drv_context( + udata, + struct c4iw_ucontext, + ibucontext); pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) @@ -1017,8 +1020,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, if (vector >= rhp->rdev.lldi.nciq) return ERR_PTR(-EINVAL); - if (ib_context) { - ucontext = to_c4iw_ucontext(ib_context); + if (udata) { if (udata->inlen < sizeof(ucmd)) ucontext->is_32b_cqe = 1; } @@ -1070,7 +1072,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, /* * memsize must be a multiple of the page size if its a user cq. */ - if (ucontext) + if (udata) memsize = roundup(memsize, PAGE_SIZE); chp->cq.size = hwentries; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 2b66cf6108d7..9bb93fddb0ae 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1056,7 +1056,6 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 12f7d3ae6a53..0fbad47661cc 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -204,8 +204,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata) mutex_unlock(&rhp->rdev.stats.lock); } -static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) { struct c4iw_pd *php = to_c4iw_pd(pd); struct ib_device *ibdev = pd->device; @@ -220,7 +219,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, php->pdid = pdid; php->rhp = rhp; - if (context) { + if (udata) { struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index e3a25516a6f4..9dd9f5d30b33 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -309,7 +309,6 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); @@ -321,6 +320,11 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, int vector = attr->comp_vector; int cq_entries = attr->cqe; int ret; + struct hns_roce_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct hns_roce_ucontext, + ibucontext); if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", @@ -339,7 +343,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, hr_cq->ib_cq.cqe = cq_entries - 1; spin_lock_init(&hr_cq->lock); - if (context) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "Failed to copy_from_udata.\n"); ret = -EFAULT; @@ -357,7 +361,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp))) { - ret = hns_roce_db_map_user(to_hr_ucontext(context), + ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, &hr_cq->db); if (ret) { @@ -369,7 +373,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, } /* Get user space parameters */ - uar = &to_hr_ucontext(context)->uar; + uar = context->uar; } else { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); @@ -408,7 +412,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, * problems if tptr is set to zero here, so we initialze it in user * space. */ - if (!context && hr_cq->tptr_addr) + if (!udata && hr_cq->tptr_addr) *hr_cq->tptr_addr = 0; /* Get created cq handler and carry out event */ @@ -416,7 +420,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, hr_cq->event = hns_roce_ib_cq_event; hr_cq->cq_depth = cq_entries; - if (context) { + if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) @@ -429,21 +433,21 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, hns_roce_free_cq(hr_dev, hr_cq); err_dbmap: - if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp))) - hns_roce_db_unmap_user(to_hr_ucontext(context), + hns_roce_db_unmap_user(context, &hr_cq->db); err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); - if (context) + if (udata) ib_umem_release(hr_cq->umem); else hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); err_db: - if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) + if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_cq->db); err_cq: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d0eafe12c1fc..1cf20aca3f36 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1114,8 +1114,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata); -int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); @@ -1179,7 +1178,6 @@ int to_hr_qp_type(int qp_type); struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 504e6e466d72..813401384d78 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -57,8 +57,7 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } -int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ib_dev = ibpd->device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); @@ -72,7 +71,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, return ret; } - if (context) { + if (udata) { struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 2f0b3ce13872..72d437c2eda8 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -291,18 +291,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_ /** * i40iw_alloc_pd - allocate protection domain * @pd: PD pointer - * @context: user context created during alloc * @udata: user data */ -static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_device *iwdev = to_iwdev(pd->device); struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_alloc_pd_resp uresp; struct i40iw_sc_pd *sc_pd; - struct i40iw_ucontext *ucontext; u32 pd_id = 0; int err; @@ -318,8 +315,12 @@ static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, sc_pd = &iwpd->sc_pd; - if (context) { - ucontext = to_ucontext(context); + if (udata) { + struct i40iw_ucontext *ucontext = + rdma_udata_to_drv_context( + udata, + struct i40iw_ucontext, + ibucontext); dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); memset(&uresp, 0, sizeof(uresp)); uresp.pd_id = pd_id; @@ -1091,12 +1092,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) * i40iw_create_cq - create cq * @ibdev: device pointer from stack * @attr: attributes for cq - * @context: user context created during alloc * @udata: user data */ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct i40iw_device *iwdev = to_iwdev(ibdev); @@ -1146,14 +1145,17 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, info.ceq_id_valid = true; info.ceqe_mask = 1; info.type = I40IW_CQ_TYPE_IWARP; - if (context) { - struct i40iw_ucontext *ucontext; + if (udata) { + struct i40iw_ucontext *ucontext = + rdma_udata_to_drv_context( + udata, + struct i40iw_ucontext, + ibucontext); struct i40iw_create_cq_req req; struct i40iw_cq_mr *cqmr; memset(&req, 0, sizeof(req)); iwcq->user_mode = true; - ucontext = to_ucontext(context); if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { err_code = -EFAULT; goto cq_free_resources; @@ -1223,7 +1225,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, goto cq_free_resources; } - if (context) { + if (udata) { struct i40iw_create_cq_resp resp; memset(&resp, 0, sizeof(resp)); diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5403a1ff7cc2..b48ffdb2fc77 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -174,7 +174,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -184,6 +183,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, struct mlx4_uar *uar; void *buf_addr; int err; + struct mlx4_ib_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mlx4_ib_ucontext, + ibucontext); if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); @@ -205,7 +209,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); - if (context) { + if (udata) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { @@ -219,12 +223,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, if (err) goto err_cq; - err = mlx4_ib_db_map_user(to_mucontext(context), udata, - ucmd.db_addr, &cq->db); + err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); if (err) goto err_mtt; - uar = &to_mucontext(context)->uar; + uar = &context->uar; cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); @@ -253,17 +256,17 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, cq->db.dma, &cq->mcq, vector, 0, !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), - buf_addr, !!context); + buf_addr, !!udata); if (err) goto err_dbmap; - if (context) + if (udata) cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; else cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; - if (context) + if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_cq_free; @@ -275,19 +278,19 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, mlx4_cq_free(dev->dev, &cq->mcq); err_dbmap: - if (context) - mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); + if (udata) + mlx4_ib_db_unmap_user(context, &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); - if (context) + if (udata) ib_umem_release(cq->umem); else mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: - if (!context) + if (!udata) mlx4_db_free(dev->dev, &cq->db); err_cq: diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 3aab71b29ce8..e775211df583 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c @@ -31,6 +31,7 @@ */ #include <linux/slab.h> +#include <rdma/uverbs_ioctl.h> #include "mlx4_ib.h" @@ -41,12 +42,16 @@ struct mlx4_ib_user_db_page { int refcnt; }; -int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, struct mlx4_db *db) { struct mlx4_ib_user_db_page *page; int err = 0; + struct mlx4_ib_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mlx4_ib_ucontext, + ibucontext); mutex_lock(&context->db_page_mutex); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e50f9de71119..952b1bac46db 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1177,8 +1177,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) } } -static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx4_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; @@ -1188,7 +1187,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, if (err) return err; - if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); return -EFAULT; } @@ -1201,7 +1200,6 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) } static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, struct ib_udata *udata) { struct mlx4_ib_xrcd *xrcd; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 4567b38d3d91..812c4c1d0250 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -722,8 +722,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); -int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, struct mlx4_db *db); void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); @@ -747,7 +746,6 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 99ceffe5cfec..208a5645e5eb 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1042,7 +1042,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (qp_has_rq(init_attr)) { err = mlx4_ib_db_map_user( - context, udata, + udata, (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr : ucmd.wq.db_addr, &qp->db); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 2a20205d1662..94c3c334a672 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -131,8 +131,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_mtt; - err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr, - &srq->db); + err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0f82cecf81d8..da7f03172a2a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -679,8 +679,7 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format) } static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, - struct ib_ucontext *context, struct mlx5_ib_cq *cq, - int entries, u32 **cqb, + struct mlx5_ib_cq *cq, int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) { struct mlx5_ib_create_cq ucmd = {}; @@ -691,6 +690,11 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, int ncont; void *cqc; int err; + struct mlx5_ib_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mlx5_ib_ucontext, + ibucontext); ucmdlen = udata->inlen < sizeof(ucmd) ? (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); @@ -715,7 +719,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, return err; } - err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr, + err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); if (err) goto err_umem; @@ -740,7 +744,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = to_mucontext(context)->bfregi.sys_pages[0]; + *index = context->bfregi.sys_pages[0]; if (ucmd.cqe_comp_en == 1) { int mini_cqe_format; @@ -782,23 +786,29 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; } - MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid); + MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid); return 0; err_cqb: kvfree(*cqb); err_db: - mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + mlx5_ib_db_unmap_user(context, &cq->db); err_umem: ib_umem_release(cq->buf.umem); return err; } -static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) +static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) { - mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + struct mlx5_ib_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mlx5_ib_ucontext, + ibucontext); + + mlx5_ib_db_unmap_user(context, &cq->db); ib_umem_release(cq->buf.umem); } @@ -883,7 +893,6 @@ static void notify_soft_wc_handler(struct work_struct *work) struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -923,9 +932,9 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->list_send_qp); INIT_LIST_HEAD(&cq->list_recv_qp); - if (context) { - err = create_cq_user(dev, udata, context, cq, entries, - &cqb, &cqe_size, &index, &inlen); + if (udata) { + err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, + &index, &inlen); if (err) goto err_create; } else { @@ -962,7 +971,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); cq->mcq.irqn = irqn; - if (context) + if (udata) cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; else cq->mcq.comp = mlx5_ib_cq_comp; @@ -970,7 +979,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->wc_list); - if (context) + if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { err = -EFAULT; goto err_cmd; @@ -985,8 +994,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, err_cqb: kvfree(cqb); - if (context) - destroy_cq_user(cq, context); + if (udata) + destroy_cq_user(cq, udata); else destroy_cq_kernel(dev, cq); @@ -1001,15 +1010,10 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_cq *mcq = to_mcq(cq); - struct ib_ucontext *context = - container_of( - udata, - struct uverbs_attr_bundle, - driver_udata)->context; mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); - if (context) - destroy_cq_user(mcq, context); + if (udata) + destroy_cq_user(mcq, udata); else destroy_cq_kernel(dev, mcq); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index dd59ff21c8e1..3a128a502477 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2334,8 +2334,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) return 0; } -static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mlx5_ib_pd *pd = to_mpd(ibpd); struct ib_device *ibdev = ibpd->device; @@ -2344,8 +2343,13 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; + struct mlx5_ib_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mlx5_ib_ucontext, + ibucontext); - uid = context ? to_mucontext(context)->devx_uid : 0; + uid = context ? context->devx_uid : 0; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, uid, uid); err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), @@ -2355,7 +2359,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->uid = uid; - if (context) { + if (udata) { resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); @@ -4742,11 +4746,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0); - ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL); + ret = mlx5_ib_alloc_pd(devr->p0, NULL); if (ret) goto error0; - devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; @@ -4758,7 +4762,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) devr->c0->cq_context = NULL; atomic_set(&devr->c0->usecnt, 0); - devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x0)) { ret = PTR_ERR(devr->x0); goto error2; @@ -4769,7 +4773,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) mutex_init(&devr->x0->tgt_qp_mutex); INIT_LIST_HEAD(&devr->x0->tgt_qp_list); - devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x1)) { ret = PTR_ERR(devr->x1); goto error3; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b94f51dd3590..8cc3815ed28a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1083,7 +1083,6 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, int buflen, size_t *bc); struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); @@ -1125,8 +1124,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_mad_hdr *out, size_t *out_mad_size, u16 *out_mad_pkey_index); struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); + struct ib_udata *udata); int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8af5d0cde533..195fdf8c6074 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -5631,8 +5631,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, } struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) + struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_xrcd *xrcd; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 825c739d2dfb..58695581aa08 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -363,18 +363,17 @@ static int mthca_mmap_uar(struct ib_ucontext *context, return 0; } -static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct mthca_pd *pd = to_mpd(ibpd); int err; - err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); + err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); if (err) return err; - if (context) { + if (udata) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); return -EFAULT; @@ -634,7 +633,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -642,6 +640,11 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, struct mthca_cq *cq; int nent; int err; + struct mthca_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct mthca_ucontext, + ibucontext); if (attr->flags) return ERR_PTR(-EINVAL); @@ -649,19 +652,19 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return ERR_PTR(-EINVAL); - if (context) { + if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); - err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, - ucmd.set_db_index, ucmd.set_db_page); + err = mthca_map_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.set_db_index, + ucmd.set_db_page); if (err) return ERR_PTR(err); - err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, - ucmd.arm_db_index, ucmd.arm_db_page); + err = mthca_map_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.arm_db_index, + ucmd.arm_db_page); if (err) goto err_unmap_set; } @@ -672,7 +675,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, goto err_unmap_arm; } - if (context) { + if (udata) { cq->buf.mr.ibmr.lkey = ucmd.lkey; cq->set_ci_db_index = ucmd.set_db_index; cq->arm_db_index = ucmd.arm_db_index; @@ -681,14 +684,13 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, for (nent = 1; nent <= entries; nent <<= 1) ; /* nothing */ - err = mthca_init_cq(to_mdev(ibdev), nent, - context ? to_mucontext(context) : NULL, - context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, + err = mthca_init_cq(to_mdev(ibdev), nent, context, + udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, cq); if (err) goto err_free; - if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { + if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { mthca_free_cq(to_mdev(ibdev), cq); err = -EFAULT; goto err_free; @@ -702,14 +704,14 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, kfree(cq); err_unmap_arm: - if (context) - mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, ucmd.arm_db_index); + if (udata) + mthca_unmap_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.arm_db_index); err_unmap_set: - if (context) - mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, - to_mucontext(context)->db_tab, ucmd.set_db_index); + if (udata) + mthca_unmap_user_db(to_mdev(ibdev), &context->uar, + context->db_tab, ucmd.set_db_index); return ERR_PTR(err); } diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index dcf1a6744b17..7d4d2951f3c5 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -642,21 +642,25 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) /** * nes_alloc_pd */ -static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata) +static int nes_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { struct ib_device *ibdev = pd->device; struct nes_pd *nespd = to_nespd(pd); struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; - struct nes_ucontext *nesucontext; struct nes_alloc_pd_resp uresp; u32 pd_num = 0; int err; + struct nes_ucontext *nesucontext = + rdma_udata_to_drv_context( + udata, + struct nes_ucontext, + ibucontext); nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", - nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context, + nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, + ibdev, &nesucontext->ibucontext, netdev_refcnt_read(nesvnic->netdev)); err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, @@ -669,8 +673,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd; - if (context) { - nesucontext = to_nesucontext(context); + if (udata) { nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells, NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", @@ -1382,7 +1385,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) */ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -1428,8 +1430,13 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; - if (context) { - nes_ucontext = to_nesucontext(context); + if (udata) { + struct nes_ucontext *nes_ucontext = + rdma_udata_to_drv_context( + udata, + struct nes_ucontext, + ibucontext); + if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) { nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); kfree(nescq); @@ -1496,7 +1503,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n"); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1525,7 +1532,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, if (nesadapter->free_4kpbl == 0) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); nes_free_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1547,7 +1554,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, if (nesadapter->free_256pbl == 0) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); nes_free_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1573,7 +1580,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16))); - if (context) { + if (udata) { if (pbl_entries != 1) u64temp = (u64)nespbl->pbl_pbase; else @@ -1604,7 +1611,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, nescq->hw_cq.cq_number, ret); if ((!ret) || (cqp_request->major_code)) { nes_put_cqp_request(nesdev, cqp_request); - if (!context) + if (!udata) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); else { @@ -1618,7 +1625,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, } nes_put_cqp_request(nesdev, cqp_request); - if (context) { + if (udata) { /* free the nespbl */ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, nespbl->pbl_pbase); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index ecfe5a9c6477..d1e0b0ca9061 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -47,6 +47,7 @@ #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> +#include <rdma/uverbs_ioctl.h> #include "ocrdma.h" #include "ocrdma_hw.h" @@ -367,6 +368,16 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) return status; } +/* + * NOTE: + * + * ocrdma_ucontext must be used here because this function is also + * called from ocrdma_alloc_ucontext where ib_udata does not have + * valid ib_ucontext pointer. ib_uverbs_get_context does not call + * uobj_{alloc|get_xxx} helpers which are used to store the + * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so + * ib_udata does NOT imply valid ib_ucontext here! + */ static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, struct ocrdma_ucontext *uctx, struct ib_udata *udata) @@ -593,7 +604,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) } static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) { int status; @@ -601,7 +611,11 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, u64 dpp_page_addr = 0; u32 db_page_size; struct ocrdma_alloc_pd_uresp rsp; - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + struct ocrdma_ucontext *uctx = + rdma_udata_to_drv_context( + udata, + struct ocrdma_ucontext, + ibucontext); memset(&rsp, 0, sizeof(rsp)); rsp.id = pd->id; @@ -639,18 +653,20 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, return status; } -int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_pd *pd; - struct ocrdma_ucontext *uctx = NULL; int status; u8 is_uctx_pd = false; + struct ocrdma_ucontext *uctx = + rdma_udata_to_drv_context( + udata, + struct ocrdma_ucontext, + ibucontext); - if (udata && context) { - uctx = get_ocrdma_ucontext(context); + if (udata) { pd = ocrdma_get_ucontext_pd(uctx); if (pd) { is_uctx_pd = true; @@ -664,8 +680,8 @@ int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, goto exit; pd_mapping: - if (udata && context) { - status = ocrdma_copy_pd_uresp(dev, pd, context, udata); + if (udata) { + status = ocrdma_copy_pd_uresp(dev, pd, udata); if (status) goto err; } @@ -946,13 +962,20 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) } static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, - struct ib_udata *udata, - struct ib_ucontext *ib_ctx) + struct ib_udata *udata) { int status; - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); + struct ocrdma_ucontext *uctx = + rdma_udata_to_drv_context( + udata, + struct ocrdma_ucontext, + ibucontext); struct ocrdma_create_cq_uresp uresp; + /* this must be user flow! */ + if (!udata) + return -EINVAL; + memset(&uresp, 0, sizeof(uresp)); uresp.cq_id = cq->id; uresp.page_size = PAGE_ALIGN(cq->len); @@ -983,13 +1006,16 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) { int entries = attr->cqe; struct ocrdma_cq *cq; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); - struct ocrdma_ucontext *uctx = NULL; + struct ocrdma_ucontext *uctx = + rdma_udata_to_drv_context( + udata, + struct ocrdma_ucontext, + ibucontext); u16 pd_id = 0; int status; struct ocrdma_create_cq_ureq ureq; @@ -1011,18 +1037,16 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->rq_head); - if (ib_ctx) { - uctx = get_ocrdma_ucontext(ib_ctx); + if (udata) pd_id = uctx->cntxt_pd->id; - } status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); if (status) { kfree(cq); return ERR_PTR(status); } - if (ib_ctx) { - status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); + if (udata) { + status = ocrdma_copy_cq_uresp(dev, cq, udata); if (status) goto ctx_err; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index a449752b9eb7..184f88bc4ca8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -69,13 +69,11 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, - struct ib_udata *udata); +int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index c15d238afe2e..81e9c77459df 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -42,6 +42,7 @@ #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> +#include <rdma/uverbs_ioctl.h> #include <linux/qed/common_hsi.h> #include "qedr_hsi_rdma.h" @@ -436,8 +437,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot); } -int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct qedr_dev *dev = get_qedr_dev(ibdev); @@ -446,7 +446,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, int rc; DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", - (udata && context) ? "User Lib" : "Kernel"); + udata ? "User Lib" : "Kernel"); if (!dev->rdma_ctx) { DP_ERR(dev, "invalid RDMA context\n"); @@ -459,10 +459,15 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, pd->pd_id = pd_id; - if (udata && context) { + if (udata) { struct qedr_alloc_pd_uresp uresp = { .pd_id = pd_id, }; + struct qedr_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct qedr_ucontext, + ibucontext); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) { @@ -471,7 +476,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, return rc; } - pd->uctx = get_qedr_ucontext(context); + pd->uctx = context; pd->uctx->pd = pd; } @@ -816,9 +821,13 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) struct ib_cq *qedr_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata) + struct ib_udata *udata) { - struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx); + struct qedr_ucontext *ctx = + rdma_udata_to_drv_context( + udata, + struct qedr_ucontext, + ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; struct qedr_dev *dev = get_qedr_dev(ibdev); @@ -906,7 +915,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, cq->sig = QEDR_CQ_MAGIC_NUMBER; spin_lock_init(&cq->cq_lock); - if (ib_ctx) { + if (udata) { rc = qedr_copy_cq_uresp(dev, cq, udata); if (rc) goto err3; diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index cd9659ac2aad..46a9828b9777 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -47,13 +47,11 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void qedr_dealloc_ucontext(struct ib_ucontext *uctx); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, - struct ib_udata *udata); +int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_cq *qedr_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_ctx, struct ib_udata *udata); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index d8d81ba054fc..7379d965ee3f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -447,8 +447,7 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, return 0; } -int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); void *umem_pd; @@ -590,7 +589,6 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct ib_cq *cq; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 349c8dc13a12..028f322f8e9b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -50,8 +50,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid); int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); -int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata); +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, @@ -61,7 +60,6 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 5ba278324134..cf3560fb85e4 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -49,6 +49,7 @@ #include <rdma/ib_addr.h> #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> +#include <rdma/uverbs_ioctl.h> #include "pvrdma.h" @@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, * pvrdma_create_cq - create completion queue * @ibdev: the device * @attr: completion queue attributes - * @context: user context * @udata: user data * * @return: ib_cq completion queue pointer on success, @@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, */ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int entries = attr->cqe; @@ -116,6 +115,11 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; struct pvrdma_create_cq_resp cq_resp = {0}; struct pvrdma_create_cq ucmd; + struct pvrdma_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct pvrdma_ucontext, + ibucontext); BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -133,7 +137,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, } cq->ibcq.cqe = entries; - cq->is_kernel = !context; + cq->is_kernel = !udata; if (!cq->is_kernel) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { @@ -185,8 +189,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; cmd->nchunks = npages; - cmd->ctx_handle = (context) ? - (u64)to_vucontext(context)->ctx_handle : 0; + cmd->ctx_handle = context ? context->ctx_handle : 0; cmd->cqe = entries; cmd->pdir_dma = cq->pdir.dir_dma; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); @@ -204,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); if (!cq->is_kernel) { - cq->uar = &(to_vucontext(context)->uar); + cq->uar = &context->uar; /* Copy udata back. */ if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index e5b12c93218f..a69f435787b0 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -50,6 +50,7 @@ #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/vmw_pvrdma-abi.h> +#include <rdma/uverbs_ioctl.h> #include "pvrdma.h" @@ -421,13 +422,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) /** * pvrdma_alloc_pd - allocate protection domain * @ibpd: PD pointer - * @context: user context * @udata: user data * * @return: the ib_pd protection domain pointer on success, otherwise errno. */ -int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct pvrdma_pd *pd = to_vpd(ibpd); @@ -438,13 +437,18 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; + struct pvrdma_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct pvrdma_ucontext, + ibucontext); /* Check allowed max pds */ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) return -ENOMEM; cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; - cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; + cmd->ctx_handle = context ? context->ctx_handle : 0; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, @@ -453,12 +457,12 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, goto err; } - pd->privileged = !context; + pd->privileged = !udata; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; pd_resp.pdn = resp->pd_handle; - if (context) { + if (udata) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 2c8ba5bf8d0f..562b70e70e79 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -398,8 +398,7 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void pvrdma_dealloc_ucontext(struct ib_ucontext *context); -int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, @@ -412,7 +411,6 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 6f7ff2384506..a06e6da7a026 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -168,7 +168,6 @@ static void send_complete(struct work_struct *work) * rvt_create_cq - create a completion queue * @ibdev: the device this completion queue is attached to * @attr: creation attributes - * @context: unused by the QLogic_IB driver * @udata: user data for libibverbs.so * * Called by ib_create_cq() in the generic verbs code. @@ -178,7 +177,6 @@ static void send_complete(struct work_struct *work) */ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); @@ -232,7 +230,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, if (udata && udata->outlen >= sizeof(__u64)) { int err; - cq->ip = rvt_create_mmap_info(rdi, sz, context, wc); + cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc); if (!cq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wc; diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h index e42661ecdef8..3ad6faf18ecb 100644 --- a/drivers/infiniband/sw/rdmavt/cq.h +++ b/drivers/infiniband/sw/rdmavt/cq.h @@ -53,7 +53,6 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index 6b712eecbd37..7a0d8b5b5fcf 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -49,6 +49,7 @@ #include <linux/vmalloc.h> #include <linux/mm.h> #include <asm/pgtable.h> +#include <rdma/uverbs_ioctl.h> #include "mmap.h" /** @@ -150,18 +151,21 @@ int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) * rvt_create_mmap_info - allocate information for hfi1_mmap * @rdi: rvt dev struct * @size: size in bytes to map - * @context: user context + * @udata: user data (must be valid!) * @obj: opaque pointer to a cq, wq etc * * Return: rvt_mmap struct on success */ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, - struct ib_ucontext *context, + struct ib_udata *udata, void *obj) { struct rvt_mmap_info *ip; + if (!udata) + return ERR_PTR(-EINVAL); + ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); if (!ip) return ip; @@ -177,7 +181,11 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, INIT_LIST_HEAD(&ip->pending_mmaps); ip->size = size; - ip->context = context; + ip->context = + container_of( + udata, + struct uverbs_attr_bundle, + driver_udata)->context; ip->obj = obj; kref_init(&ip->ref); diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h index fab0e7b1daf9..af01152e7b08 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.h +++ b/drivers/infiniband/sw/rdmavt/mmap.h @@ -55,7 +55,7 @@ void rvt_release_mmap_info(struct kref *ref); int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, - struct ib_ucontext *context, + struct ib_udata *udata, void *obj); void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, u32 size, void *obj); diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c index e84341282374..a403718f0b5e 100644 --- a/drivers/infiniband/sw/rdmavt/pd.c +++ b/drivers/infiniband/sw/rdmavt/pd.c @@ -51,15 +51,13 @@ /** * rvt_alloc_pd - allocate a protection domain * @ibpd: PD - * @context: optional user context * @udata: optional user data * * Allocate and keep track of a PD. * * Return: 0 on success */ -int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct rvt_dev_info *dev = ib_to_rvt(ibdev); diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h index d0368a625e03..71ba76d72b1d 100644 --- a/drivers/infiniband/sw/rdmavt/pd.h +++ b/drivers/infiniband/sw/rdmavt/pd.h @@ -50,8 +50,7 @@ #include <rdma/rdma_vt.h> -int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); +int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); #endif /* DEF_RDMAVTPD_H */ diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index e8bba7e56c29..90ed99f4b026 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -957,8 +957,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, size_t sg_list_sz; struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); - struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( - udata, struct rvt_ucontext, ibucontext); void *priv = NULL; size_t sqsize; @@ -1131,8 +1129,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, } else { u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; - qp->ip = rvt_create_mmap_info(rdi, s, - &ucontext->ibucontext, + qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->r_rq.wq); if (!qp->ip) { ret = ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 3090b0935714..9dd5e90faa42 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c @@ -78,8 +78,6 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, struct ib_udata *udata) { struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); - struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( - udata, struct rvt_ucontext, ibucontext); struct rvt_srq *srq; u32 sz; struct ib_srq *ret; @@ -122,8 +120,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = - rvt_create_mmap_info(dev, s, &ucontext->ibucontext, - srq->rq.wq); + rvt_create_mmap_info(dev, s, udata, srq->rq.wq); if (!srq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wq; diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index a57276f2cb84..ad3090131126 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data) } int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, - int comp_vector, struct ib_ucontext *context, + int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) { int err; @@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, return -ENOMEM; } - err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); if (err) { vfree(cq->queue->buf); @@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, } int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, - struct rxe_resize_cq_resp __user *uresp) + struct rxe_resize_cq_resp __user *uresp, + struct ib_udata *udata) { int err; err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe, - sizeof(struct rxe_cqe), - cq->queue->ip ? cq->queue->ip->context : NULL, + sizeof(struct rxe_cqe), udata, uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock); if (!err) cq->ibcq.cqe = cqe; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 3d8cef836f0d..da1f4601c9dd 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector); int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, - int comp_vector, struct ib_ucontext *context, + int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp); int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, - struct rxe_resize_cq_resp __user *uresp); + struct rxe_resize_cq_resp __user *uresp, + struct ib_udata *udata); int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); @@ -93,7 +94,7 @@ void rxe_mmap_release(struct kref *ref); struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size, - struct ib_ucontext *context, + struct ib_udata *udata, void *obj); int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); @@ -225,12 +226,13 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp); int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, - struct rxe_modify_srq_cmd *ucmd); + struct rxe_modify_srq_cmd *ucmd, + struct ib_udata *udata); void rxe_dealloc(struct ib_device *ib_dev); diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index d22431e3a908..e08e98ecd8f4 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -36,6 +36,7 @@ #include <linux/mm.h> #include <linux/errno.h> #include <asm/pgtable.h> +#include <rdma/uverbs_ioctl.h> #include "rxe.h" #include "rxe_loc.h" @@ -142,11 +143,14 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) */ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, - struct ib_ucontext *context, + struct ib_udata *udata, void *obj) { struct rxe_mmap_info *ip; + if (!udata) + return ERR_PTR(-EINVAL); + ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) return NULL; @@ -165,7 +169,11 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, INIT_LIST_HEAD(&ip->pending_mmaps); ip->info.size = size; - ip->context = context; + ip->context = + container_of( + udata, + struct uverbs_attr_bundle, + driver_udata)->context; ip->obj = obj; kref_init(&ip->ref); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 09ede70dc1e8..9a1bf51fb0a0 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -218,7 +218,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) { int err; @@ -254,7 +254,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, if (!qp->sq.queue) return -ENOMEM; - err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); @@ -287,7 +287,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) { int err; @@ -308,7 +308,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, if (!qp->rq.queue) return -ENOMEM; - err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context, + err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { @@ -344,8 +344,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; - struct rxe_ucontext *ucontext = - rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc); rxe_add_ref(pd); rxe_add_ref(rcq); @@ -360,11 +358,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, rxe_qp_init_misc(rxe, qp, init); - err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp); + err = rxe_qp_init_req(rxe, qp, init, udata, uresp); if (err) goto err1; - err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp); + err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); if (err) goto err2; diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index f84ab4469261..3d98dfbee6bd 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -38,7 +38,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_queue_buf *buf, size_t buf_size, struct rxe_mmap_info **ip_p) @@ -47,7 +47,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct rxe_mmap_info *ip = NULL; if (outbuf) { - ip = rxe_create_mmap_info(rxe, buf_size, context, buf); + ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); if (!ip) goto err1; @@ -156,7 +156,7 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, - struct ib_ucontext *context, + struct ib_udata *udata, struct mminfo __user *outbuf, spinlock_t *producer_lock, spinlock_t *consumer_lock) @@ -170,7 +170,7 @@ int rxe_queue_resize(struct rxe_queue *q, if (!new_q) return -ENOMEM; - err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf, + err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf, new_q->buf_size, &new_q->ip); if (err) { vfree(new_q->buf); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 79ba4b320054..2771eb6b2c05 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -78,7 +78,7 @@ struct rxe_queue { int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_queue_buf *buf, size_t buf_size, struct rxe_mmap_info **ip_p); @@ -92,7 +92,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, - struct ib_ucontext *context, + struct ib_udata *udata, struct mminfo __user *outbuf, /* Protect producers while resizing queue */ spinlock_t *producer_lock, diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index c41a5fee81f7..873513b57b4b 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -100,7 +100,7 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, - struct ib_ucontext *context, + struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp) { int err; @@ -128,7 +128,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->rq.queue = q; - err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf, + err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, q->buf_size, &q->ip); if (err) { vfree(q->buf); @@ -149,7 +149,8 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, - struct rxe_modify_srq_cmd *ucmd) + struct rxe_modify_srq_cmd *ucmd, + struct ib_udata *udata) { int err; struct rxe_queue *q = srq->rq.queue; @@ -164,10 +165,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, err = rxe_queue_resize(q, &attr->max_wr, rcv_wqe_size(srq->rq.max_sge), - srq->rq.queue->ip ? - srq->rq.queue->ip->context : - NULL, - mi, &srq->rq.producer_lock, + udata, mi, &srq->rq.producer_lock, &srq->rq.consumer_lock); if (err) goto err2; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 14102e75fe84..0538909059b1 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num, return 0; } -static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, - struct ib_udata *udata) +static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); @@ -305,8 +304,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_ucontext *ucontext = - rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc); struct rxe_srq *srq; struct rxe_create_srq_resp __user *uresp = NULL; @@ -330,7 +327,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, rxe_add_ref(pd); srq->pd = pd; - err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp); + err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) goto err2; @@ -366,7 +363,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (err) goto err1; - err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd); + err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); if (err) goto err1; @@ -799,7 +796,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, static struct ib_cq *rxe_create_cq(struct ib_device *dev, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata) { int err; @@ -827,7 +823,7 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev, } err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, - context, uresp); + udata, uresp); if (err) goto err2; @@ -866,7 +862,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (err) goto err1; - err = rxe_cq_resize_queue(cq, cqe, uresp); + err = rxe_cq_resize_queue(cq, cqe, uresp, udata); if (err) goto err1; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 11242ca3c9fb..ff0a6c018cf6 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2394,8 +2394,7 @@ struct ib_device_ops { void (*dealloc_ucontext)(struct ib_ucontext *context); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); - int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, - struct ib_udata *udata); + int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); struct ib_ah *(*create_ah)(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, @@ -2421,7 +2420,6 @@ struct ib_device_ops { int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); struct ib_cq *(*create_cq)(struct ib_device *device, const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); @@ -2456,7 +2454,6 @@ struct ib_device_ops { int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, - struct ib_ucontext *ucontext, struct ib_udata *udata); int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); struct ib_flow *(*create_flow)(struct ib_qp *qp, -- 2.20.1