From: Leon Romanovsky <leonro@xxxxxxxxxxxx> Follow PD conversion patch, do the same for ucontext allocations. Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx> --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/rdma_core.c | 9 +--- drivers/infiniband/core/uverbs_cmd.c | 24 +++++---- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 32 ++++-------- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 7 ++- drivers/infiniband/hw/bnxt_re/main.c | 1 + drivers/infiniband/hw/cxgb3/iwch_provider.c | 17 +++---- drivers/infiniband/hw/cxgb4/provider.c | 26 ++++------ drivers/infiniband/hw/hns/hns_roce_main.c | 26 ++++------ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 50 ++++++------------- drivers/infiniband/hw/mlx4/main.c | 30 ++++------- drivers/infiniband/hw/mlx5/main.c | 35 ++++++------- drivers/infiniband/hw/mthca/mthca_provider.c | 39 ++++++--------- drivers/infiniband/hw/nes/nes_verbs.c | 34 +++++-------- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 1 + drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 38 ++++++-------- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 5 +- drivers/infiniband/hw/qedr/main.c | 1 + drivers/infiniband/hw/qedr/verbs.c | 34 ++++--------- drivers/infiniband/hw/qedr/verbs.h | 4 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 + drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 18 +++---- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 5 +- .../infiniband/hw/vmw_pvrdma/pvrdma_main.c | 1 + .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 49 ++++++------------ .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 5 +- drivers/infiniband/sw/rdmavt/vt.c | 22 +++----- drivers/infiniband/sw/rxe/rxe_pool.c | 1 + drivers/infiniband/sw/rxe/rxe_verbs.c | 14 +++--- drivers/infiniband/sw/rxe/rxe_verbs.h | 2 +- include/rdma/ib_verbs.h | 7 +-- 31 files changed, 199 insertions(+), 340 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 39af07cc20af..868d656f3dda 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1419,6 +1419,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, unmap_fmr); SET_OBJ_SIZE(dev_ops, ib_pd); + SET_OBJ_SIZE(dev_ops, ib_ucontext); } EXPORT_SYMBOL(ib_set_device_ops); diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index a260d2f8e0b7..64708b45a486 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -812,7 +812,6 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, { struct ib_ucontext *ucontext = ufile->ucontext; struct ib_device *ib_dev = ucontext->device; - int ret; /* * If we are closing the FD then the user mmap VMAs must have @@ -830,12 +829,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, rdma_restrack_del(&ucontext->res); - /* - * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove - * the error return. - */ - ret = ib_dev->ops.dealloc_ucontext(ucontext); - WARN_ON(ret); + ib_dev->ops.dealloc_ucontext(ucontext); + kfree(ucontext); ufile->ucontext = NULL; } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 5ac143f22df0..4f70b34dc3fa 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -224,12 +224,13 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) if (ret) goto err; - ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata); - if (IS_ERR(ucontext)) { - ret = PTR_ERR(ucontext); + ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext); + if (!ucontext) { + ret = -ENOMEM; goto err_alloc; } + ucontext->res.type = RDMA_RESTRACK_CTX; ucontext->device = ib_dev; ucontext->cg_obj = cg_obj; /* ufile is required when some objects are released */ @@ -240,10 +241,6 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) mutex_init(&ucontext->per_mm_list_lock); INIT_LIST_HEAD(&ucontext->per_mm_list); - if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) - ucontext->invalidate_range = NULL; - - resp.num_comp_vectors = file->device->num_comp_vectors; ret = get_unused_fd_flags(O_CLOEXEC); if (ret < 0) @@ -256,15 +253,22 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) goto err_fd; } + resp.num_comp_vectors = file->device->num_comp_vectors; + ret = uverbs_response(attrs, &resp, sizeof(resp)); if (ret) goto err_file; - fd_install(resp.async_fd, filp); + ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata); + if (ret) + goto err_file; + if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) + ucontext->invalidate_range = NULL; - ucontext->res.type = RDMA_RESTRACK_CTX; rdma_restrack_uadd(&ucontext->res); + fd_install(resp.async_fd, filp); + /* * Make sure that ib_uverbs_get_ucontext() sees the pointer update * only after all writes to setup the ucontext have completed @@ -283,7 +287,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) put_unused_fd(resp.async_fd); err_free: - ib_dev->ops.dealloc_ucontext(ucontext); + kfree(ucontext); err_alloc: ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index bff9320a968e..01c045d94890 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3670,13 +3670,14 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, return ERR_PTR(rc); } -struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) { + struct ib_device *ibdev = ctx->device; + struct bnxt_re_ucontext *uctx = + container_of(ctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_uctx_resp resp; - struct bnxt_re_ucontext *uctx; u32 chip_met_rev_num = 0; int rc; @@ -3686,13 +3687,9 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) { dev_dbg(rdev_to_dev(rdev), " is different from the device %d ", BNXT_RE_ABI_VERSION); - return ERR_PTR(-EPERM); + return -EPERM; } - uctx = kzalloc(sizeof(*uctx), GFP_KERNEL); - if (!uctx) - return ERR_PTR(-ENOMEM); - uctx->rdev = rdev; uctx->shpg = (void *)__get_free_page(GFP_KERNEL); @@ -3726,23 +3723,21 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, goto cfail; } - return &uctx->ib_uctx; + return 0; cfail: free_page((unsigned long)uctx->shpg); uctx->shpg = NULL; fail: - kfree(uctx); - return ERR_PTR(rc); + return rc; } -int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) +void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) { struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = uctx->rdev; - int rc = 0; if (uctx->shpg) free_page((unsigned long)uctx->shpg); @@ -3751,17 +3746,10 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) /* Free DPI only if this is the first PD allocated by the * application and mark the context dpi as NULL */ - rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, - &rdev->qplib_res.dpi_tbl, - &uctx->dpi); - if (rc) - dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!"); - /* Don't fail, continue*/ + bnxt_qplib_dealloc_dpi(&rdev->qplib_res, + &rdev->qplib_res.dpi_tbl, &uctx->dpi); uctx->dpi.dbr = NULL; } - - kfree(uctx); - return 0; } /* Helper function to mmap the virtual memory from user app */ diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index c7cca803cfa3..e45465ed4eee 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -135,8 +135,8 @@ struct bnxt_re_mw { }; struct bnxt_re_ucontext { + struct ib_ucontext ib_uctx; struct bnxt_re_dev *rdev; - struct ib_ucontext ib_uctx; struct bnxt_qplib_dpi dpi; void *shpg; spinlock_t sh_lock; /* protect shpg */ @@ -215,9 +215,8 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw); struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); -struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata); -int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); +int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata); +void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 0a89ef6e5754..2bd24ac45ee4 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -638,6 +638,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .reg_user_mr = bnxt_re_reg_user_mr, .req_notify_cq = bnxt_re_req_notify_cq, INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), }; static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 80dff6804e48..edab6ed77b5e 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -61,7 +61,7 @@ #include <rdma/cxgb3-abi.h> #include "common.h" -static int iwch_dealloc_ucontext(struct ib_ucontext *context) +static void iwch_dealloc_ucontext(struct ib_ucontext *context) { struct iwch_dev *rhp = to_iwch_dev(context->device); struct iwch_ucontext *ucontext = to_iwch_ucontext(context); @@ -71,24 +71,20 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); - kfree(ucontext); - return 0; } -static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int iwch_alloc_ucontext(struct ib_ucontext *ucontext, + struct ib_udata *udata) { - struct iwch_ucontext *context; + struct ib_device *ibdev = ucontext->device; + struct iwch_ucontext *context = to_iwch_ucontext(ucontext); struct iwch_dev *rhp = to_iwch_dev(ibdev); pr_debug("%s ibdev %p\n", __func__, ibdev); - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); cxio_init_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); - return &context->ibucontext; + return 0; } static int iwch_destroy_cq(struct ib_cq *ib_cq) @@ -1340,6 +1336,7 @@ static const struct ib_device_ops iwch_dev_ops = { .req_notify_cq = iwch_arm_cq, .resize_cq = iwch_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext), }; int iwch_register_device(struct iwch_dev *dev) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 81fcffb597ab..507c54572cc9 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -58,7 +58,7 @@ static int fastreg_support = 1; module_param(fastreg_support, int, 0644); MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); -static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +static void c4iw_dealloc_ucontext(struct ib_ucontext *context) { struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_dev *rhp; @@ -70,26 +70,19 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); - kfree(ucontext); - return 0; } -static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext, + struct ib_udata *udata) { - struct c4iw_ucontext *context; + struct ib_device *ibdev = ucontext->device; + struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext); struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_alloc_ucontext_resp uresp; int ret = 0; struct c4iw_mm_entry *mm = NULL; pr_debug("ibdev %p\n", ibdev); - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) { - ret = -ENOMEM; - goto err; - } - c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); @@ -101,7 +94,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; - goto err_free; + goto err; } uresp.status_page_size = PAGE_SIZE; @@ -121,13 +114,11 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, mm->len = PAGE_SIZE; insert_mmap(context, mm); } - return &context->ibucontext; + return 0; err_mm: kfree(mm); -err_free: - kfree(context); err: - return ERR_PTR(ret); + return ret; } static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) @@ -555,6 +546,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .reg_user_mr = c4iw_reg_user_mr, .req_notify_cq = c4iw_arm_cq, INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext), }; void c4iw_register_device(struct work_struct *work) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index ccf10622586c..90cdb4bcd632 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -330,23 +330,19 @@ static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, return 0; } -static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, - struct ib_udata *udata) +static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, + struct ib_udata *udata) { int ret = 0; - struct hns_roce_ucontext *context; + struct hns_roce_ucontext *context = to_hr_ucontext(uctx); struct hns_roce_ib_alloc_ucontext_resp resp = {}; - struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); if (!hr_dev->active) - return ERR_PTR(-EAGAIN); + return -EAGAIN; resp.qp_tab_size = hr_dev->caps.num_qps; - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_fail_uar_alloc; @@ -360,25 +356,20 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, if (ret) goto error_fail_copy_to_udata; - return &context->ibucontext; + return 0; error_fail_copy_to_udata: hns_roce_uar_free(hr_dev, &context->uar); error_fail_uar_alloc: - kfree(context); - - return ERR_PTR(ret); + return ret; } -static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) +static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar); - kfree(context); - - return 0; } static int hns_roce_mmap(struct ib_ucontext *context, @@ -473,6 +464,7 @@ static const struct ib_device_ops hns_roce_dev_ops = { .query_port = hns_roce_query_port, .reg_user_mr = hns_roce_reg_user_mr, INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), }; static const struct ib_device_ops hns_roce_dev_mr_ops = { diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index d5fb2b927587..e30c4b42cff8 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -120,78 +120,55 @@ static int i40iw_query_port(struct ib_device *ibdev, /** * i40iw_alloc_ucontext - Allocate the user context data structure - * @ibdev: device pointer from stack + * @uctx: Uverbs context pointer from stack * @udata: user data * * This keeps track of all objects associated with a particular * user-mode client. */ -static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int i40iw_alloc_ucontext(struct ib_ucontext *uctx, + struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; struct i40iw_device *iwdev = to_iwdev(ibdev); struct i40iw_alloc_ucontext_req req; - struct i40iw_alloc_ucontext_resp uresp; - struct i40iw_ucontext *ucontext; + struct i40iw_alloc_ucontext_resp uresp = {}; + struct i40iw_ucontext *ucontext = to_ucontext(uctx); if (ib_copy_from_udata(&req, udata, sizeof(req))) - return ERR_PTR(-EINVAL); + return -EINVAL; if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) { i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver); - return ERR_PTR(-EINVAL); + return -EINVAL; } - memset(&uresp, 0, sizeof(uresp)); uresp.max_qps = iwdev->max_qp; uresp.max_pds = iwdev->max_pd; uresp.wq_size = iwdev->max_qp_wr * 2; uresp.kernel_ver = req.userspace_ver; - ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); - if (!ucontext) - return ERR_PTR(-ENOMEM); - ucontext->iwdev = iwdev; ucontext->abi_ver = req.userspace_ver; - if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { - kfree(ucontext); - return ERR_PTR(-EFAULT); - } + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) + return -EFAULT; INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); spin_lock_init(&ucontext->cq_reg_mem_list_lock); INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); spin_lock_init(&ucontext->qp_reg_mem_list_lock); - return &ucontext->ibucontext; + return 0; } /** * i40iw_dealloc_ucontext - deallocate the user context data structure * @context: user context created during alloc */ -static int i40iw_dealloc_ucontext(struct ib_ucontext *context) +static void i40iw_dealloc_ucontext(struct ib_ucontext *context) { - struct i40iw_ucontext *ucontext = to_ucontext(context); - unsigned long flags; - - spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); - if (!list_empty(&ucontext->cq_reg_mem_list)) { - spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); - return -EBUSY; - } - spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); - spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); - if (!list_empty(&ucontext->qp_reg_mem_list)) { - spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); - return -EBUSY; - } - spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); - - kfree(ucontext); - return 0; + return; } /** @@ -2739,6 +2716,7 @@ static const struct ib_device_ops i40iw_dev_ops = { .reg_user_mr = i40iw_reg_user_mr, .req_notify_cq = i40iw_req_notify_cq, INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext), }; /** diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c0f6aea7ed7c..733f7bbd5901 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1076,17 +1076,18 @@ static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, return err; } -static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx, + struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; struct mlx4_ib_dev *dev = to_mdev(ibdev); - struct mlx4_ib_ucontext *context; + struct mlx4_ib_ucontext *context = to_mucontext(uctx); struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; struct mlx4_ib_alloc_ucontext_resp resp; int err; if (!dev->ib_active) - return ERR_PTR(-EAGAIN); + return -EAGAIN; if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { resp_v3.qp_tab_size = dev->dev->caps.num_qps; @@ -1100,15 +1101,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, resp.cqe_size = dev->dev->caps.cqe_size; } - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); - if (err) { - kfree(context); - return ERR_PTR(err); - } + if (err) + return err; INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1123,21 +1118,17 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, if (err) { mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); - kfree(context); - return ERR_PTR(-EFAULT); + return -EFAULT; } - return &context->ibucontext; + return err; } -static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); - kfree(context); - - return 0; } static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) @@ -2570,6 +2561,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = { .rereg_user_mr = mlx4_ib_rereg_user_mr, .resize_cq = mlx4_ib_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), }; static const struct ib_device_ops mlx4_ib_dev_wq_ops = { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index efead59ca498..a33d4375ef15 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1686,14 +1686,15 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, mlx5_ib_disable_lb(dev, true, false); } -static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, + struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_alloc_ucontext_req_v2 req = {}; struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_core_dev *mdev = dev->mdev; - struct mlx5_ib_ucontext *context; + struct mlx5_ib_ucontext *context = to_mucontext(uctx); struct mlx5_bfreg_info *bfregi; int ver; int err; @@ -1703,29 +1704,29 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, bool lib_uar_4k; if (!dev->ib_active) - return ERR_PTR(-EAGAIN); + return -EAGAIN; if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) ver = 0; else if (udata->inlen >= min_req_v2) ver = 2; else - return ERR_PTR(-EINVAL); + return -EINVAL; err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); if (err) - return ERR_PTR(err); + return err; if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; req.total_num_bfregs = ALIGN(req.total_num_bfregs, MLX5_NON_FP_BFREGS_PER_UAR); if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) - return ERR_PTR(-EINVAL); + return -EINVAL; resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) @@ -1758,10 +1759,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ } - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; bfregi = &context->bfregi; @@ -1896,7 +1893,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 1, &dev->roce[port].tx_port_affinity)); } - return &context->ibucontext; + return 0; out_mdev: mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); @@ -1914,12 +1911,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, kfree(bfregi->count); out_ctx: - kfree(context); - - return ERR_PTR(err); + return err; } -static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); @@ -1939,9 +1934,6 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) deallocate_uars(dev, context); kfree(bfregi->sys_pages); kfree(bfregi->count); - kfree(context); - - return 0; } static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, @@ -5925,6 +5917,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .rereg_user_mr = mlx5_ib_rereg_user_mr, .resize_cq = mlx5_ib_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), }; static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 516c8cf9c0fd..9b7afbcbde51 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -300,17 +300,16 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, return err; } -static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int mthca_alloc_ucontext(struct ib_ucontext *uctx, + struct ib_udata *udata) { - struct mthca_alloc_ucontext_resp uresp; - struct mthca_ucontext *context; + struct ib_device *ibdev = uctx->device; + struct mthca_alloc_ucontext_resp uresp = {}; + struct mthca_ucontext *context = to_mucontext(uctx); int err; if (!(to_mdev(ibdev)->active)) - return ERR_PTR(-EAGAIN); - - memset(&uresp, 0, sizeof uresp); + return -EAGAIN; uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; if (mthca_is_memfree(to_mdev(ibdev))) @@ -318,44 +317,33 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, else uresp.uarc_size = 0; - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); - if (err) { - kfree(context); - return ERR_PTR(err); - } + if (err) + return err; context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); if (IS_ERR(context->db_tab)) { err = PTR_ERR(context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); - kfree(context); - return ERR_PTR(err); + return err; } - if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); - kfree(context); - return ERR_PTR(-EFAULT); + return -EFAULT; } context->reg_mr_warned = 0; - return &context->ibucontext; + return 0; } -static int mthca_dealloc_ucontext(struct ib_ucontext *context) +static void mthca_dealloc_ucontext(struct ib_ucontext *context) { mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, to_mucontext(context)->db_tab); mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); - kfree(to_mucontext(context)); - - return 0; } static int mthca_mmap_uar(struct ib_ucontext *context, @@ -1215,6 +1203,7 @@ static const struct ib_device_ops mthca_dev_ops = { .reg_user_mr = mthca_reg_user_mr, .resize_cq = mthca_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), }; static const struct ib_device_ops mthca_dev_arbel_srq_ops = { diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 9239d39e140e..9345ae3b028a 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -528,27 +528,27 @@ static int nes_query_gid(struct ib_device *ibdev, u8 port, * nes_alloc_ucontext - Allocate the user context data structure. This keeps track * of all objects associated with a particular user-mode client. */ -static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int nes_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_alloc_ucontext_req req; - struct nes_alloc_ucontext_resp urespi = {}; - struct nes_ucontext *nes_ucontext; + struct nes_alloc_ucontext_resp uresp = {}; + struct nes_ucontext *nes_ucontext = to_nesucontext(uctx); struct nes_ib_device *nesibdev = nesvnic->nesibdev; if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) { printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (req.userspace_ver != NES_ABI_USERSPACE_VER) { printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n", req.userspace_ver, NES_ABI_USERSPACE_VER); - return ERR_PTR(-EINVAL); + return -EINVAL; } @@ -558,10 +558,6 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev, uresp.virtwq = nesadapter->virtwq; uresp.kernel_ver = NES_ABI_KERNEL_VER; - nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL); - if (!nes_ucontext) - return ERR_PTR(-ENOMEM); - nes_ucontext->nesdev = nesdev; nes_ucontext->mmap_wq_offset = uresp.max_pds; nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset + @@ -569,29 +565,22 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev, PAGE_SIZE; - if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { - kfree(nes_ucontext); - return ERR_PTR(-EFAULT); - } + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) + return -EFAULT; INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list); INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list); - return &nes_ucontext->ibucontext; + return 0; } - /** * nes_dealloc_ucontext */ -static int nes_dealloc_ucontext(struct ib_ucontext *context) +static void nes_dealloc_ucontext(struct ib_ucontext *context) { - struct nes_ucontext *nes_ucontext = to_nesucontext(context); - - kfree(nes_ucontext); - return 0; + return; } - /** * nes_mmap */ @@ -3641,6 +3630,7 @@ static const struct ib_device_ops nes_dev_ops = { .reg_user_mr = nes_reg_user_mr, .req_notify_cq = nes_req_notify_cq, INIT_RDMA_OBJ_SIZE(ib_pd, nes_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, nes_ucontext, ibucontext), }; /** diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 0de83c92691f..b9e10d55a58e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -180,6 +180,7 @@ static const struct ib_device_ops ocrdma_dev_ops = { .req_notify_cq = ocrdma_arm_cq, .resize_cq = ocrdma_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext), }; static const struct ib_device_ops ocrdma_dev_srq_ops = { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index ed5da67b693d..b4e1777c2c97 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -440,7 +440,7 @@ static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, return status; } -static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) +static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) { struct ocrdma_pd *pd = uctx->cntxt_pd; struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); @@ -451,8 +451,7 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) } kfree(uctx->cntxt_pd); uctx->cntxt_pd = NULL; - (void)_ocrdma_dealloc_pd(dev, pd); - return 0; + _ocrdma_dealloc_pd(dev, pd); } static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) @@ -476,33 +475,28 @@ static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) mutex_unlock(&uctx->mm_list_lock); } -struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; int status; - struct ocrdma_ucontext *ctx; - struct ocrdma_alloc_ucontext_resp resp; + struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx); + struct ocrdma_alloc_ucontext_resp resp = {}; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct pci_dev *pdev = dev->nic_info.pdev; u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); if (!udata) - return ERR_PTR(-EFAULT); - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + return -EFAULT; INIT_LIST_HEAD(&ctx->mm_head); mutex_init(&ctx->mm_list_lock); ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, &ctx->ah_tbl.pa, GFP_KERNEL); - if (!ctx->ah_tbl.va) { - kfree(ctx); - return ERR_PTR(-ENOMEM); - } + if (!ctx->ah_tbl.va) + return -ENOMEM; + ctx->ah_tbl.len = map_len; - memset(&resp, 0, sizeof(resp)); resp.ah_tbl_len = ctx->ah_tbl.len; resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); @@ -524,7 +518,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, status = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (status) goto cpy_err; - return &ctx->ibucontext; + return 0; cpy_err: ocrdma_dealloc_ucontext_pd(ctx); @@ -533,19 +527,17 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, map_err: dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, ctx->ah_tbl.pa); - kfree(ctx); - return ERR_PTR(status); + return status; } -int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) +void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) { - int status; struct ocrdma_mm *mm, *tmp; struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); struct pci_dev *pdev = dev->nic_info.pdev; - status = ocrdma_dealloc_ucontext_pd(uctx); + ocrdma_dealloc_ucontext_pd(uctx); ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, @@ -555,8 +547,6 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) list_del(&mm->entry); kfree(mm); } - kfree(uctx); - return status; } int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 1fd66721c930..4c04ab40798e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -64,9 +64,8 @@ void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num); int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); -struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *, - struct ib_udata *); -int ocrdma_dealloc_ucontext(struct ib_ucontext *); +int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 44ce4989dcef..996d9ecd93e0 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -240,6 +240,7 @@ static const struct ib_device_ops qedr_dev_ops = { .req_notify_cq = qedr_arm_cq, .resize_cq = qedr_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), }; static int qedr_register_device(struct qedr_dev *dev) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a613ebde322f..0bebbdd3d352 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -316,28 +316,24 @@ static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr, return found; } -struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; int rc; - struct qedr_ucontext *ctx; - struct qedr_alloc_ucontext_resp uresp; + struct qedr_ucontext *ctx = get_qedr_ucontext(uctx); + struct qedr_alloc_ucontext_resp uresp = {}; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_add_user_out_params oparams; if (!udata) - return ERR_PTR(-EFAULT); - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + return -EFAULT; rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams); if (rc) { DP_ERR(dev, "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n", rc); - goto err; + return rc; } ctx->dpi = oparams.dpi; @@ -347,8 +343,6 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, INIT_LIST_HEAD(&ctx->mm_head); mutex_init(&ctx->mm_list_lock); - memset(&uresp, 0, sizeof(uresp)); - uresp.dpm_enabled = dev->user_dpm_enabled; uresp.wids_enabled = 1; uresp.wid_count = oparams.wid_count; @@ -364,28 +358,23 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) - goto err; + return rc; ctx->dev = dev; rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size); if (rc) - goto err; + return rc; DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n", &ctx->ibucontext); - return &ctx->ibucontext; - -err: - kfree(ctx); - return ERR_PTR(rc); + return 0; } -int qedr_dealloc_ucontext(struct ib_ucontext *ibctx) +void qedr_dealloc_ucontext(struct ib_ucontext *ibctx) { struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx); struct qedr_mm *mm, *tmp; - int status = 0; DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n", uctx); @@ -398,9 +387,6 @@ int qedr_dealloc_ucontext(struct ib_ucontext *ibctx) list_del(&mm->entry); kfree(mm); } - - kfree(uctx); - return status; } int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 97a6ff3f9afb..f0c05f4771ac 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -43,8 +43,8 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port, int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); -struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); -int qedr_dealloc_ucontext(struct ib_ucontext *); +int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void qedr_dealloc_ucontext(struct ib_ucontext *uctx); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 256ad2f236c8..8e69affd7157 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -353,6 +353,7 @@ static const struct ib_device_ops usnic_dev_ops = { .query_qp = usnic_ib_query_qp, .reg_user_mr = usnic_ib_reg_mr, INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), }; /* Start of PF discovery section */ diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 0ced89b51448..38a3ce62b6d5 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -662,37 +662,31 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr) return 0; } -struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { - struct usnic_ib_ucontext *context; + struct ib_device *ibdev = uctx->device; + struct usnic_ib_ucontext *context = to_ucontext(uctx); struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&context->qp_grp_list); mutex_lock(&us_ibdev->usdev_lock); list_add_tail(&context->link, &us_ibdev->ctx_list); mutex_unlock(&us_ibdev->usdev_lock); - return &context->ibucontext; + return 0; } -int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct usnic_ib_ucontext *context = to_uucontext(ibcontext); struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); - BUG_ON(!list_empty(&context->qp_grp_list)); + WARN_ON_ONCE(!list_empty(&context->qp_grp_list)); list_del(&context->link); mutex_unlock(&us_ibdev->usdev_lock); - kfree(context); - return 0; } int usnic_ib_mmap(struct ib_ucontext *context, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 44a9d2f82bf5..f5893dc7d441 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -69,9 +69,8 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata); int usnic_ib_dereg_mr(struct ib_mr *ibmr); -struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata); -int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); +int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); int usnic_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); #endif /* !USNIC_IB_VERBS_H */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 47e653d2495c..6d8b3e0de57a 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -196,6 +196,7 @@ static const struct ib_device_ops pvrdma_dev_ops = { .reg_user_mr = pvrdma_reg_user_mr, .req_notify_cq = pvrdma_req_notify_cq, INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext), }; static const struct ib_device_ops pvrdma_dev_srq_ops = { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index f44220f72e05..eb4409417e08 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -306,41 +306,33 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, /** * pvrdma_alloc_ucontext - allocate ucontext - * @ibdev: the IB device + * @uctx: the uverbs countext * @udata: user data * - * @return: the ib_ucontext pointer on success, otherwise errno. + * @return: zero on success, otherwise errno. */ -struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { + struct ib_device *ibdev = uctx->device; struct pvrdma_dev *vdev = to_vdev(ibdev); - struct pvrdma_ucontext *context; - union pvrdma_cmd_req req; - union pvrdma_cmd_resp rsp; + struct pvrdma_ucontext *context = to_vucontext(uctx); + union pvrdma_cmd_req req = {}; + union pvrdma_cmd_resp rsp = {}; struct pvrdma_cmd_create_uc *cmd = &req.create_uc; struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; - struct pvrdma_alloc_ucontext_resp uresp = {0}; + struct pvrdma_alloc_ucontext_resp uresp = {}; int ret; void *ptr; if (!vdev->ib_active) - return ERR_PTR(-EAGAIN); - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); + return -EAGAIN; context->dev = vdev; ret = pvrdma_uar_alloc(vdev, &context->uar); - if (ret) { - kfree(context); - return ERR_PTR(-ENOMEM); - } + if (ret) + return -ENOMEM; /* get ctx_handle from host */ - memset(cmd, 0, sizeof(*cmd)); - if (vdev->dsr_version < PVRDMA_PPN64_VERSION) cmd->pfn = context->uar.pfn; else @@ -351,7 +343,6 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, if (ret < 0) { dev_warn(&vdev->pdev->dev, "could not create ucontext, error: %d\n", ret); - ptr = ERR_PTR(ret); goto err; } @@ -362,33 +353,28 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (ret) { pvrdma_uar_free(vdev, &context->uar); - context->ibucontext.device = ibdev; pvrdma_dealloc_ucontext(&context->ibucontext); - return ERR_PTR(-EFAULT); + return -EFAULT; } - return &context->ibucontext; + return 0; err: pvrdma_uar_free(vdev, &context->uar); - kfree(context); - return ptr; + return ret; } /** * pvrdma_dealloc_ucontext - deallocate ucontext * @ibcontext: the ucontext - * - * @return: 0 on success, otherwise errno. */ -int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext) +void pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct pvrdma_ucontext *context = to_vucontext(ibcontext); - union pvrdma_cmd_req req; + union pvrdma_cmd_req req = {}; struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc; int ret; - memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC; cmd->ctx_handle = context->ctx_handle; @@ -399,9 +385,6 @@ int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext) /* Free the UAR even if the device command failed */ pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar); - kfree(context); - - return ret; } /** diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index ed91baad1ffa..607aa131d67c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -396,9 +396,8 @@ int pvrdma_modify_device(struct ib_device *ibdev, int mask, int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props); int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); -struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata); -int pvrdma_dealloc_ucontext(struct ib_ucontext *context); +int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void pvrdma_dealloc_ucontext(struct ib_ucontext *context); int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, struct ib_udata *udata); void pvrdma_dealloc_pd(struct ib_pd *ibpd); diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index a19832c73d5a..baede5bfcd82 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -296,28 +296,21 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext /** * rvt_alloc_ucontext - Allocate a user context - * @ibdev: Verbs IB dev + * @uctx: Verbs context * @udata: User data allocated */ -static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) +static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { - struct rvt_ucontext *context; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); - return &context->ibucontext; + return 0; } /** - *rvt_dealloc_ucontext - Free a user context - *@context - Free this + * rvt_dealloc_ucontext - Free a user context + * @context - Free this */ -static int rvt_dealloc_ucontext(struct ib_ucontext *context) +static void rvt_dealloc_ucontext(struct ib_ucontext *context) { - kfree(to_iucontext(context)); - return 0; + return; } static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num, @@ -437,6 +430,7 @@ static const struct ib_device_ops rvt_dev_ops = { .resize_cq = rvt_resize_cq, .unmap_fmr = rvt_unmap_fmr, INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext), }; static noinline int check_support(struct rvt_dev_info *rdi, int verb) diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index cd3f14629ba8..a9e744b8d673 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -42,6 +42,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), + .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_PD] = { .name = "rxe-pd", diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 051c3930e808..c810ed4235b1 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -154,22 +154,19 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, return rxe_link_layer(rxe, port_num); } -static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, - struct ib_udata *udata) +static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); - struct rxe_ucontext *uc; + struct rxe_dev *rxe = to_rdev(uctx->device); + struct rxe_ucontext *uc = to_ruc(uctx); - uc = rxe_alloc(&rxe->uc_pool); - return uc ? &uc->ibuc : ERR_PTR(-ENOMEM); + return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem); } -static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc) +static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) { struct rxe_ucontext *uc = to_ruc(ibuc); rxe_drop_ref(uc); - return 0; } static int rxe_port_immutable(struct ib_device *dev, u8 port_num, @@ -1181,6 +1178,7 @@ static const struct ib_device_ops rxe_dev_ops = { .req_notify_cq = rxe_req_notify_cq, .resize_cq = rxe_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), }; int rxe_register_device(struct rxe_dev *rxe) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 70839d3f55d9..eab5dd9163c8 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -61,8 +61,8 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) } struct rxe_ucontext { + struct ib_ucontext ibuc; struct rxe_pool_entry pelem; - struct ib_ucontext ibuc; }; struct rxe_pd { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 34fb5c8b7e57..f042c2d00d1a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2380,9 +2380,9 @@ struct ib_device_ops { int (*del_gid)(const struct ib_gid_attr *attr, void **context); int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, u16 *pkey); - struct ib_ucontext *(*alloc_ucontext)(struct ib_device *device, - struct ib_udata *udata); - int (*dealloc_ucontext)(struct ib_ucontext *context); + int (*alloc_ucontext)(struct ib_ucontext *context, + struct ib_udata *udata); + void (*dealloc_ucontext)(struct ib_ucontext *context); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, @@ -2531,6 +2531,7 @@ struct ib_device_ops { struct rdma_restrack_entry *entry); DECLARE_RDMA_OBJ_SIZE(ib_pd); + DECLARE_RDMA_OBJ_SIZE(ib_ucontext); }; struct rdma_restrack_root; -- 2.19.1