From: Leon Romanovsky <leonro@xxxxxxxxxxxx> The PD allocations in IB/core allows us to massively simplify drivers and their error flows in their .alloc_pd() paths. The changes in .alloc_pd() got hand in had with relevant update in .dealloc_pd(). We will use this opportunity and convert .dealloc_pd() to don't fail, as it was suggested a long time ago. Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx> --- drivers/infiniband/core/device.c | 2 + drivers/infiniband/core/uverbs_cmd.c | 15 ++- drivers/infiniband/core/uverbs_std_types.c | 2 +- drivers/infiniband/core/verbs.c | 27 +++--- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 37 +++----- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 9 +- drivers/infiniband/hw/bnxt_re/main.c | 1 + drivers/infiniband/hw/cxgb3/iwch_provider.c | 25 ++--- drivers/infiniband/hw/cxgb4/provider.c | 25 ++--- drivers/infiniband/hw/hns/hns_roce_device.h | 7 +- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 27 +++--- drivers/infiniband/hw/hns/hns_roce_main.c | 1 + drivers/infiniband/hw/hns/hns_roce_pd.c | 25 ++--- drivers/infiniband/hw/i40iw/i40iw_utils.c | 1 - drivers/infiniband/hw/i40iw/i40iw_verbs.c | 32 +++---- drivers/infiniband/hw/mlx4/main.c | 36 +++----- drivers/infiniband/hw/mlx5/main.c | 48 +++++----- drivers/infiniband/hw/mthca/mthca_provider.c | 29 ++---- drivers/infiniband/hw/nes/nes_verbs.c | 32 +++---- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 1 + drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 92 ++++++++----------- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 6 +- drivers/infiniband/hw/qedr/main.c | 1 + drivers/infiniband/hw/qedr/verbs.c | 34 ++----- drivers/infiniband/hw/qedr/verbs.h | 6 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 + drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 26 ++---- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 7 +- .../infiniband/hw/vmw_pvrdma/pvrdma_main.c | 1 + .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 43 +++------ .../infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 7 +- drivers/infiniband/sw/rdmavt/pd.c | 29 ++---- drivers/infiniband/sw/rdmavt/pd.h | 7 +- drivers/infiniband/sw/rdmavt/vt.c | 1 + drivers/infiniband/sw/rxe/rxe.c | 8 -- drivers/infiniband/sw/rxe/rxe_pool.c | 4 - drivers/infiniband/sw/rxe/rxe_pool.h | 1 - drivers/infiniband/sw/rxe/rxe_qp.c | 4 - drivers/infiniband/sw/rxe/rxe_verbs.c | 34 ++----- drivers/infiniband/sw/rxe/rxe_verbs.h | 2 - include/rdma/ib_verbs.h | 9 +- 41 files changed, 270 insertions(+), 435 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index c332398376fc..e380a2663f31 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1312,6 +1312,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, set_vf_link_state); SET_DEVICE_OP(dev_ops, unmap_fmr); SET_DEVICE_OP(dev_ops, fill_res_entry); + + SET_OBJ_SIZE(dev_ops, ib_pd); } EXPORT_SYMBOL(ib_set_device_ops); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4ac97a61d567..882c7985367f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -407,9 +407,9 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) if (IS_ERR(uobj)) return PTR_ERR(uobj); - pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata); - if (IS_ERR(pd)) { - ret = PTR_ERR(pd); + pd = rdma_zalloc_drv_obj(ib_dev, ib_pd); + if (!pd) { + ret = -ENOMEM; goto err; } @@ -417,11 +417,15 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) pd->uobject = uobj; pd->__internal_mr = NULL; atomic_set(&pd->usecnt, 0); + rdma_rt_set_type(&pd->res, RDMA_RESTRACK_PD); + + ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata); + if (ret) + goto err_alloc; uobj->object = pd; memset(&resp, 0, sizeof resp); resp.pd_handle = uobj->id; - rdma_rt_set_type(&pd->res, RDMA_RESTRACK_PD); rdma_restrack_uadd(&pd->res); ret = uverbs_response(attrs, &resp, sizeof(resp)); @@ -432,7 +436,8 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) err_copy: ib_dealloc_pd(pd); - +err_alloc: + kfree(pd); err: uobj_alloc_abort(uobj); return ret; diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index cbc72312eb41..f224cb727224 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -188,7 +188,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject, if (ret) return ret; - ib_dealloc_pd((struct ib_pd *)uobject->object); + ib_dealloc_pd(pd); return 0; } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 814cbc814cf4..2ade82c99e70 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -254,10 +254,11 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, { struct ib_pd *pd; int mr_access_flags = 0; + int ret; - pd = device->ops.alloc_pd(device, NULL, NULL); - if (IS_ERR(pd)) - return pd; + pd = rdma_zalloc_drv_obj(device, ib_pd); + if (!pd) + return ERR_PTR(-ENOMEM); pd->device = device; pd->uobject = NULL; @@ -265,6 +266,16 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, atomic_set(&pd->usecnt, 0); pd->flags = flags; + rdma_rt_set_type(&pd->res, RDMA_RESTRACK_PD); + rdma_restrack_set_task(&pd->res, caller); + + ret = device->ops.alloc_pd(pd, NULL, NULL); + if (ret) { + kfree(pd); + return ERR_PTR(ret); + } + rdma_restrack_kadd(&pd->res); + if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) pd->local_dma_lkey = device->local_dma_lkey; else @@ -275,10 +286,6 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; } - rdma_rt_set_type(&pd->res, RDMA_RESTRACK_PD); - rdma_restrack_set_task(&pd->res, caller); - rdma_restrack_kadd(&pd->res); - if (mr_access_flags) { struct ib_mr *mr; @@ -329,10 +336,8 @@ void ib_dealloc_pd(struct ib_pd *pd) WARN_ON(atomic_read(&pd->usecnt)); rdma_restrack_del(&pd->res); - /* Making delalloc_pd a void return is a WIP, no driver should return - an error here. */ - ret = pd->device->ops.dealloc_pd(pd); - WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); + pd->device->ops.dealloc_pd(pd); + kfree(pd); } EXPORT_SYMBOL(ib_dealloc_pd); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9bc637e49faa..e71fee5f42f4 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -563,41 +563,29 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) } /* Protection Domains */ -int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) +void bnxt_re_dealloc_pd(struct ib_pd *ib_pd) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; - int rc; bnxt_re_destroy_fence_mr(pd); - if (pd->qplib_pd.id) { - rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, - &rdev->qplib_res.pd_tbl, - &pd->qplib_pd); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); - } - - kfree(pd); - return 0; + if (pd->qplib_pd.id) + bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); } -struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *ucontext, - struct ib_udata *udata) +int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext, + struct ib_udata *udata) { + struct ib_device *ibdev = ibpd->device; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_re_ucontext *ucntx = container_of(ucontext, struct bnxt_re_ucontext, ib_uctx); - struct bnxt_re_pd *pd; + struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); int rc; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - pd->rdev = rdev; if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD"); @@ -637,13 +625,12 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, if (bnxt_re_create_fence_mr(pd)) dev_warn(rdev_to_dev(rdev), "Failed to create Fence-MR\n"); - return &pd->ib_pd; + return 0; dbfail: - (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, - &pd->qplib_pd); + bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); fail: - kfree(pd); - return ERR_PTR(rc); + return rc; } /* Address Handles */ diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index c4af72604b4f..c7cca803cfa3 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -56,8 +56,8 @@ struct bnxt_re_fence_data { }; struct bnxt_re_pd { + struct ib_pd ib_pd; struct bnxt_re_dev *rdev; - struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; struct bnxt_re_fence_data fence; }; @@ -163,10 +163,9 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, int index, union ib_gid *gid); enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, u8 port_num); -struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); -int bnxt_re_dealloc_pd(struct ib_pd *pd); +int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +void bnxt_re_dealloc_pd(struct ib_pd *pd); struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 16eecfa5882c..643b88642f21 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -612,6 +612,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .query_srq = bnxt_re_query_srq, .reg_user_mr = bnxt_re_reg_user_mr, .req_notify_cq = bnxt_re_req_notify_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), }; static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 07c20cd07f33..49e143b94011 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -370,7 +370,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) return ret; } -static int iwch_deallocate_pd(struct ib_pd *pd) +static void iwch_deallocate_pd(struct ib_pd *pd) { struct iwch_dev *rhp; struct iwch_pd *php; @@ -379,15 +379,13 @@ static int iwch_deallocate_pd(struct ib_pd *pd) rhp = php->rhp; pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); - kfree(php); - return 0; } -static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct iwch_pd *php; + struct iwch_pd *php = to_iwch_pd(pd); + struct ib_device *ibdev = pd->device; u32 pdid; struct iwch_dev *rhp; @@ -395,12 +393,8 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, rhp = (struct iwch_dev *) ibdev; pdid = cxio_hal_get_pdid(rhp->rdev.rscp); if (!pdid) - return ERR_PTR(-EINVAL); - php = kzalloc(sizeof(*php), GFP_KERNEL); - if (!php) { - cxio_hal_put_pdid(rhp->rdev.rscp, pdid); - return ERR_PTR(-ENOMEM); - } + return -EINVAL; + php->pdid = pdid; php->rhp = rhp; if (context) { @@ -408,11 +402,11 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { iwch_deallocate_pd(&php->ibpd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); - return &php->ibpd; + return 0; } static int iwch_dereg_mr(struct ib_mr *ib_mr) @@ -1350,6 +1344,7 @@ static const struct ib_device_ops iwch_dev_ops = { .reg_user_mr = iwch_reg_user_mr, .req_notify_cq = iwch_arm_cq, .resize_cq = iwch_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd), }; int iwch_register_device(struct iwch_dev *dev) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f35c767a9922..a3b61f54c2eb 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -209,7 +209,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) return ret; } -static int c4iw_deallocate_pd(struct ib_pd *pd) +static void c4iw_deallocate_pd(struct ib_pd *pd) { struct c4iw_dev *rhp; struct c4iw_pd *php; @@ -221,15 +221,13 @@ static int c4iw_deallocate_pd(struct ib_pd *pd) mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur--; mutex_unlock(&rhp->rdev.stats.lock); - kfree(php); - return 0; } -static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct c4iw_pd *php; + struct c4iw_pd *php = to_c4iw_pd(pd); + struct ib_device *ibdev = pd->device; u32 pdid; struct c4iw_dev *rhp; @@ -237,12 +235,8 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, rhp = (struct c4iw_dev *) ibdev; pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); if (!pdid) - return ERR_PTR(-EINVAL); - php = kzalloc(sizeof(*php), GFP_KERNEL); - if (!php) { - c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid); - return ERR_PTR(-ENOMEM); - } + return -EINVAL; + php->pdid = pdid; php->rhp = rhp; if (context) { @@ -250,7 +244,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { c4iw_deallocate_pd(&php->ibpd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } mutex_lock(&rhp->rdev.stats.lock); @@ -259,7 +253,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; mutex_unlock(&rhp->rdev.stats.lock); pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php); - return &php->ibpd; + return 0; } static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, @@ -570,6 +564,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .reg_user_mr = c4iw_reg_user_mr, .req_notify_cq = c4iw_arm_cq, .fill_res_entry = fill_res_entry, + INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd), }; void c4iw_register_device(struct work_struct *work) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 6fde434b22fd..e159435631d6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1085,10 +1085,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); -struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, - struct ib_ucontext *context, - struct ib_udata *udata); -int hns_roce_dealloc_pd(struct ib_pd *pd); +int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +void hns_roce_dealloc_pd(struct ib_pd *pd); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index b74c742b000c..46264b7de9e4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -711,13 +711,14 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) struct ib_qp_attr attr = { 0 }; struct hns_roce_v1_priv *priv; struct hns_roce_qp *hr_qp; + struct ib_device *ibdev; struct ib_cq *cq; struct ib_pd *pd; union ib_gid dgid; u64 subnet_prefix; int attr_mask = 0; + int ret = -ENOMEM; int i, j; - int ret; u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; u8 phy_port; u8 port = 0; @@ -742,12 +743,16 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) free_mr->mr_free_cq->ib_cq.cq_context = NULL; atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); - pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL); - if (IS_ERR(pd)) { - dev_err(dev, "Create pd for reserved loop qp failed!"); - ret = -ENOMEM; + ibdev = &hr_dev->ib_dev; + pd = rdma_zalloc_drv_obj(ibdev, ib_pd); + if (pd) + goto alloc_mem_failed; + + pd->device = ibdev; + ret = hns_roce_alloc_pd(pd, NULL, NULL); + if (ret) goto alloc_pd_failed; - } + free_mr->mr_free_pd = to_hr_pd(pd); free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; free_mr->mr_free_pd->ibpd.uobject = NULL; @@ -854,10 +859,12 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) dev_err(dev, "Destroy qp %d for mr free failed!\n", i); } - if (hns_roce_dealloc_pd(pd)) - dev_err(dev, "Destroy pd for create_lp_qp failed!\n"); + hns_roce_dealloc_pd(pd); alloc_pd_failed: + kfree(pd); + +alloc_mem_failed: if (hns_roce_ib_destroy_cq(cq)) dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); @@ -891,9 +898,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) if (ret) dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); - ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); - if (ret) - dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret); + hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); } static int hns_roce_db_init(struct hns_roce_dev *hr_dev) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 67a8c4333f4f..ccf10622586c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -472,6 +472,7 @@ static const struct ib_device_ops hns_roce_dev_ops = { .query_pkey = hns_roce_query_pkey, .query_port = hns_roce_query_port, .reg_user_mr = hns_roce_reg_user_mr, + INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), }; static const struct ib_device_ops hns_roce_dev_mr_ops = { diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 4a29b2cb9bab..b9b97c5e97e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -57,24 +57,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } -struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, - struct ib_ucontext *context, - struct ib_udata *udata) +int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { + struct ib_device *ib_dev = ibpd->device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct device *dev = hr_dev->dev; - struct hns_roce_pd *pd; + struct hns_roce_pd *pd = to_hr_pd(ibpd); int ret; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); if (ret) { - kfree(pd); dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); - return ERR_PTR(ret); + return ret; } if (context) { @@ -83,21 +78,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); - kfree(pd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } - return &pd->ibpd; + return 0; } EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); -int hns_roce_dealloc_pd(struct ib_pd *pd) +void hns_roce_dealloc_pd(struct ib_pd *pd) { hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); - kfree(to_hr_pd(pd)); - - return 0; } EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 59e978141ad4..c5a881172524 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -601,7 +601,6 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) if (!atomic_dec_and_test(&iwpd->usecount)) return; i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); - kfree(iwpd); } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 12b31a8440be..bc1d1d061261 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -312,16 +312,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_ /** * i40iw_alloc_pd - allocate protection domain - * @ibdev: device pointer from stack + * @pd: PD pointer * @context: user context created during alloc * @udata: user data */ -static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct i40iw_pd *iwpd; - struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_alloc_pd_resp uresp; struct i40iw_sc_pd *sc_pd; @@ -330,19 +329,13 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, int err; if (iwdev->closing) - return ERR_PTR(-ENODEV); + return -ENODEV; err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds, iwdev->max_pd, &pd_id, &iwdev->next_pd); if (err) { i40iw_pr_err("alloc resource failed\n"); - return ERR_PTR(err); - } - - iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); - if (!iwpd) { - err = -ENOMEM; - goto free_res; + return err; } sc_pd = &iwpd->sc_pd; @@ -361,25 +354,23 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, } i40iw_add_pdusecount(iwpd); - return &iwpd->ibpd; + return 0; + error: - kfree(iwpd); -free_res: i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id); - return ERR_PTR(err); + return err; } /** * i40iw_dealloc_pd - deallocate pd * @ibpd: ptr of pd to be deallocated */ -static int i40iw_dealloc_pd(struct ib_pd *ibpd) +static void i40iw_dealloc_pd(struct ib_pd *ibpd) { struct i40iw_pd *iwpd = to_iwpd(ibpd); struct i40iw_device *iwdev = to_iwdev(ibpd->device); i40iw_rem_pdusecount(iwpd, iwdev); - return 0; } /** @@ -2750,6 +2741,7 @@ static const struct ib_device_ops i40iw_dev_ops = { .query_qp = i40iw_query_qp, .reg_user_mr = i40iw_reg_user_mr, .req_notify_cq = i40iw_req_notify_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd), }; /** diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index dc2ffd293a11..3f09ad6483d8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1186,38 +1186,27 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) } } -static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct mlx4_ib_pd *pd; + struct mlx4_ib_pd *pd = to_mpd(ibpd); + struct ib_device *ibdev = ibpd->device; int err; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); - if (err) { - kfree(pd); - return ERR_PTR(err); - } + if (err) + return err; - if (context) - if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { - mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); - kfree(pd); - return ERR_PTR(-EFAULT); - } - return &pd->ibpd; + if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); + return -EFAULT; + } + return 0; } -static int mlx4_ib_dealloc_pd(struct ib_pd *pd) +static void mlx4_ib_dealloc_pd(struct ib_pd *pd) { mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); - kfree(pd); - - return 0; } static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, @@ -2580,6 +2569,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = { .req_notify_cq = mlx4_ib_arm_cq, .rereg_user_mr = mlx4_ib_rereg_user_mr, .resize_cq = mlx4_ib_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), }; static const struct ib_device_ops mlx4_ib_dev_wq_ops = { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2aaa196c2653..9b99c33b8135 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2252,30 +2252,24 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) return 0; } -static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { + struct mlx5_ib_pd *pd = to_mpd(ibpd); + struct ib_device *ibdev = ibpd->device; struct mlx5_ib_alloc_pd_resp resp; - struct mlx5_ib_pd *pd; int err; u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; u16 uid = 0; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - uid = context ? to_mucontext(context)->devx_uid : 0; MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); MLX5_SET(alloc_pd_in, in, uid, uid); err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), out, sizeof(out)); - if (err) { - kfree(pd); - return ERR_PTR(err); - } + if (err) + return err; pd->pdn = MLX5_GET(alloc_pd_out, out, pd); pd->uid = uid; @@ -2283,23 +2277,19 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); - kfree(pd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } - return &pd->ibpd; + return 0; } -static int mlx5_ib_dealloc_pd(struct ib_pd *pd) +static void mlx5_ib_dealloc_pd(struct ib_pd *pd) { struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); - kfree(mpd); - - return 0; } enum { @@ -4652,23 +4642,28 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; + struct ib_device *ibdev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; int port; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); + ibdev = &dev->ib_dev; mutex_init(&devr->mutex); - devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); - if (IS_ERR(devr->p0)) { - ret = PTR_ERR(devr->p0); - goto error0; - } - devr->p0->device = &dev->ib_dev; + devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); + if (!devr->p0) + return -ENOMEM; + + devr->p0->device = ibdev; devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0); + ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL); + if (ret) + goto error0; + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); @@ -4766,6 +4761,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) error1: mlx5_ib_dealloc_pd(devr->p0); error0: + kfree(devr->p0); return ret; } @@ -4781,6 +4777,7 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_destroy_cq(devr->c0); mlx5_ib_dealloc_pd(devr->p0); + kfree(devr->p0); /* Make sure no change P_Key work items are still executing */ for (port = 0; port < dev->num_ports; ++port) @@ -5910,6 +5907,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .req_notify_cq = mlx5_ib_arm_cq, .rereg_user_mr = mlx5_ib_rereg_user_mr, .resize_cq = mlx5_ib_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), }; static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 1bb67562c8c8..2c754bc226f3 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -374,40 +374,30 @@ static int mthca_mmap_uar(struct ib_ucontext *context, return 0; } -static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct mthca_pd *pd; + struct ib_device *ibdev = ibpd->device; + struct mthca_pd *pd = to_mpd(ibpd); int err; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); - if (err) { - kfree(pd); - return ERR_PTR(err); - } + if (err) + return err; if (context) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); - kfree(pd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } - return &pd->ibpd; + return 0; } -static int mthca_dealloc_pd(struct ib_pd *pd) +static void mthca_dealloc_pd(struct ib_pd *pd) { mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); - kfree(pd); - - return 0; } static struct ib_ah *mthca_ah_create(struct ib_pd *pd, @@ -1228,6 +1218,7 @@ static const struct ib_device_ops mthca_dev_ops = { .query_qp = mthca_query_qp, .reg_user_mr = mthca_reg_user_mr, .resize_cq = mthca_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), }; static const struct ib_device_ops mthca_dev_arbel_srq_ops = { diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 034156f7e9ed..db0e9d55fef9 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -658,10 +658,11 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) /** * nes_alloc_pd */ -static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, struct ib_udata *udata) +static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct nes_pd *nespd; + struct ib_device *ibdev = pd->device; + struct nes_pd *nespd = to_nespd(pd); struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; @@ -676,15 +677,8 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD); - if (err) { - return ERR_PTR(err); - } - - nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL); - if (!nespd) { - nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); - return ERR_PTR(-ENOMEM); - } + if (err) + return err; nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n", nespd, dev_name(&nesvnic->nesibdev->ibdev.dev)); @@ -700,16 +694,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); - kfree(nespd); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } uresp.pd_id = nespd->pd_id; uresp.mmap_db_index = nespd->mmap_db_index; if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) { nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); - kfree(nespd); - return ERR_PTR(-EFAULT); + return -EFAULT; } set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells); @@ -718,14 +710,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, } nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd); - return &nespd->ibpd; + return 0; } /** * nes_dealloc_pd */ -static int nes_dealloc_pd(struct ib_pd *ibpd) +static void nes_dealloc_pd(struct ib_pd *ibpd) { struct nes_ucontext *nesucontext; struct nes_pd *nespd = to_nespd(ibpd); @@ -748,9 +740,6 @@ static int nes_dealloc_pd(struct ib_pd *ibpd) nespd->pd_id, nespd); nes_free_resource(nesadapter, nesadapter->allocated_pds, (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12)); - kfree(nespd); - - return 0; } @@ -3658,6 +3647,7 @@ static const struct ib_device_ops nes_dev_ops = { .query_qp = nes_query_qp, .reg_user_mr = nes_reg_user_mr, .req_notify_cq = nes_req_notify_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, nes_pd, ibpd), }; /** diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index b0491b9ecfe4..8284d8e0ca8b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -179,6 +179,7 @@ static const struct ib_device_ops ocrdma_dev_ops = { .reg_user_mr = ocrdma_reg_user_mr, .req_notify_cq = ocrdma_arm_cq, .resize_cq = ocrdma_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd), }; static const struct ib_device_ops ocrdma_dev_srq_ops = { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 2a62936bef4d..980ba97188ff 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -367,17 +367,12 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) return status; } -static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, - struct ocrdma_ucontext *uctx, - struct ib_udata *udata) +static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, + struct ocrdma_ucontext *uctx, + struct ib_udata *udata) { - struct ocrdma_pd *pd = NULL; int status; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - if (udata && uctx && dev->attr.max_dpp_pds) { pd->dpp_enabled = ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; @@ -386,15 +381,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, dev->attr.wqe_size) : 0; } - if (dev->pd_mgr->pd_prealloc_valid) { - status = ocrdma_get_pd_num(dev, pd); - if (status == 0) { - return pd; - } else { - kfree(pd); - return ERR_PTR(status); - } - } + if (dev->pd_mgr->pd_prealloc_valid) + return ocrdma_get_pd_num(dev, pd); retry: status = ocrdma_mbx_alloc_pd(dev, pd); @@ -403,13 +391,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, pd->dpp_enabled = false; pd->num_dpp_qp = 0; goto retry; - } else { - kfree(pd); - return ERR_PTR(status); } + return status; } - return pd; + return 0; } static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, @@ -418,30 +404,33 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, return (uctx->cntxt_pd == pd); } -static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, +static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) { - int status; - if (dev->pd_mgr->pd_prealloc_valid) - status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); + ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); else - status = ocrdma_mbx_dealloc_pd(dev, pd); - - kfree(pd); - return status; + ocrdma_mbx_dealloc_pd(dev, pd); } static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, struct ocrdma_ucontext *uctx, struct ib_udata *udata) { - int status = 0; + struct ib_device *ibdev = &dev->ibdev; + struct ib_pd *pd; + int status; + + pd = rdma_zalloc_drv_obj(ibdev, ib_pd); + if (!pd) + return -ENOMEM; + + pd->device = ibdev; + uctx->cntxt_pd = get_ocrdma_pd(pd); - uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); - if (IS_ERR(uctx->cntxt_pd)) { - status = PTR_ERR(uctx->cntxt_pd); - uctx->cntxt_pd = NULL; + status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata); + if (status) { + kfree(uctx->cntxt_pd); goto err; } @@ -460,6 +449,7 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) pr_err("%s(%d) Freeing in use pdid=0x%x.\n", __func__, dev->id, pd->id); } + kfree(uctx->cntxt_pd); uctx->cntxt_pd = NULL; (void)_ocrdma_dealloc_pd(dev, pd); return 0; @@ -537,6 +527,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, return &ctx->ibucontext; cpy_err: + ocrdma_dealloc_ucontext_pd(ctx); pd_err: ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); map_err: @@ -658,10 +649,10 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, return status; } -struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { + struct ib_device *ibdev = ibpd->device; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_pd *pd; struct ocrdma_ucontext *uctx = NULL; @@ -677,11 +668,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, } } - pd = _ocrdma_alloc_pd(dev, uctx, udata); - if (IS_ERR(pd)) { - status = PTR_ERR(pd); + pd = get_ocrdma_pd(ibpd); + status = _ocrdma_alloc_pd(dev, pd, uctx, udata); + if (status) goto exit; - } pd_mapping: if (udata && context) { @@ -689,25 +679,22 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, if (status) goto err; } - return &pd->ibpd; + return 0; err: - if (is_uctx_pd) { + if (is_uctx_pd) ocrdma_release_ucontext_pd(uctx); - } else { - if (_ocrdma_dealloc_pd(dev, pd)) - pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__); - } + else + _ocrdma_dealloc_pd(dev, pd); exit: - return ERR_PTR(status); + return status; } -int ocrdma_dealloc_pd(struct ib_pd *ibpd) +void ocrdma_dealloc_pd(struct ib_pd *ibpd) { struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); struct ocrdma_ucontext *uctx = NULL; - int status = 0; u64 usr_db; uctx = pd->uctx; @@ -721,11 +708,10 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd) if (is_ucontext_pd(uctx, pd)) { ocrdma_release_ucontext_pd(uctx); - return status; + return; } } - status = _ocrdma_dealloc_pd(dev, pd); - return status; + _ocrdma_dealloc_pd(dev, pd); } static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b69cfdce7970..1fd66721c930 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -70,9 +70,9 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *); int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -struct ib_pd *ocrdma_alloc_pd(struct ib_device *, - struct ib_ucontext *, struct ib_udata *); -int ocrdma_dealloc_pd(struct ib_pd *pd); +int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, + struct ib_udata *udata); +void ocrdma_dealloc_pd(struct ib_pd *pd); struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index f85e72b65a10..bb916a2d6dbb 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -239,6 +239,7 @@ static const struct ib_device_ops qedr_dev_ops = { .reg_user_mr = qedr_reg_user_mr, .req_notify_cq = qedr_arm_cq, .resize_cq = qedr_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), }; static int qedr_register_device(struct qedr_dev *dev) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 989f08633fbe..a06d2258394a 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -450,11 +450,12 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot); } -struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, struct ib_udata *udata) +int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { + struct ib_device *ibdev = ibpd->device; struct qedr_dev *dev = get_qedr_dev(ibdev); - struct qedr_pd *pd; + struct qedr_pd *pd = get_qedr_pd(ibpd); u16 pd_id; int rc; @@ -463,16 +464,12 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, if (!dev->rdma_ctx) { DP_ERR(dev, "invalid RDMA context\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); if (rc) - goto err; + return rc; pd->pd_id = pd_id; @@ -485,36 +482,23 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); - goto err; + return rc; } pd->uctx = get_qedr_ucontext(context); pd->uctx->pd = pd; } - return &pd->ibpd; - -err: - kfree(pd); - return ERR_PTR(rc); + return 0; } -int qedr_dealloc_pd(struct ib_pd *ibpd) +void qedr_dealloc_pd(struct ib_pd *ibpd) { struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_pd *pd = get_qedr_pd(ibpd); - if (!pd) { - pr_err("Invalid PD received in dealloc_pd\n"); - return -EINVAL; - } - DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); - - kfree(pd); - - return 0; } static void qedr_free_pbl(struct qedr_dev *dev, diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 1852b7012bf4..97a6ff3f9afb 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -47,9 +47,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); int qedr_dealloc_ucontext(struct ib_ucontext *); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); -struct ib_pd *qedr_alloc_pd(struct ib_device *, - struct ib_ucontext *, struct ib_udata *); -int qedr_dealloc_pd(struct ib_pd *pd); +int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, + struct ib_udata *udata); +void qedr_dealloc_pd(struct ib_pd *pd); struct ib_cq *qedr_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 3201dd1899c7..07e875e132e3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -359,6 +359,7 @@ static const struct ib_device_ops usnic_dev_ops = { .query_qp = usnic_ib_query_qp, .reg_user_mr = usnic_ib_reg_mr, .req_notify_cq = usnic_ib_req_notify_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), }; /* Start of PF discovery section */ diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 432e6f6599fa..2c101da14ab2 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -456,37 +456,23 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, return 0; } -struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct usnic_ib_pd *pd; + struct usnic_ib_pd *pd = to_upd(ibpd); void *umem_pd; - usnic_dbg("\n"); - - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return ERR_PTR(-ENOMEM); - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); if (IS_ERR_OR_NULL(umem_pd)) { - kfree(pd); - return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM); + return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; } - usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", - pd, context, dev_name(&ibdev->dev)); - return &pd->ibpd; + return 0; } -int usnic_ib_dealloc_pd(struct ib_pd *pd) +void usnic_ib_dealloc_pd(struct ib_pd *pd) { - usnic_info("freeing domain 0x%p\n", pd); - usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); - kfree(pd); - return 0; } struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index e33144261b9a..8c4c8c7befee 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -51,10 +51,9 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num); int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); -struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); -int usnic_ib_dealloc_pd(struct ib_pd *pd); +int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata); +void usnic_ib_dealloc_pd(struct ib_pd *pd); struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index b5864750fb6e..18334e6932af 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -195,6 +195,7 @@ static const struct ib_device_ops pvrdma_dev_ops = { .query_qp = pvrdma_query_qp, .reg_user_mr = pvrdma_reg_user_mr, .req_notify_cq = pvrdma_req_notify_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd), }; static const struct ib_device_ops pvrdma_dev_srq_ops = { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index e10149248ce2..192a043ab959 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -433,37 +433,29 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) /** * pvrdma_alloc_pd - allocate protection domain - * @ibdev: the IB device + * @ibpd: PD pointer * @context: user context * @udata: user data * * @return: the ib_pd protection domain pointer on success, otherwise errno. */ -struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct pvrdma_pd *pd; + struct ib_device *ibdev = ibpd->device; + struct pvrdma_pd *pd = to_vpd(ibpd); struct pvrdma_dev *dev = to_vdev(ibdev); - union pvrdma_cmd_req req; - union pvrdma_cmd_resp rsp; + union pvrdma_cmd_req req = {}; + union pvrdma_cmd_resp rsp = {}; struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; - void *ptr; /* Check allowed max pds */ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) - return ERR_PTR(-ENOMEM); + return -ENOMEM; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ptr = ERR_PTR(-ENOMEM); - goto err; - } - - memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); @@ -471,8 +463,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, dev_warn(&dev->pdev->dev, "failed to allocate protection domain, error: %d\n", ret); - ptr = ERR_PTR(ret); - goto freepd; + goto err; } pd->privileged = !context; @@ -485,18 +476,16 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); pvrdma_dealloc_pd(&pd->ibpd); - return ERR_PTR(-EFAULT); + return -EFAULT; } } /* u32 pd handle */ - return &pd->ibpd; + return 0; -freepd: - kfree(pd); err: atomic_dec(&dev->num_pds); - return ptr; + return ret; } /** @@ -505,14 +494,13 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, * * @return: 0 on success, otherwise errno. */ -int pvrdma_dealloc_pd(struct ib_pd *pd) +void pvrdma_dealloc_pd(struct ib_pd *pd) { struct pvrdma_dev *dev = to_vdev(pd->device); - union pvrdma_cmd_req req; + union pvrdma_cmd_req req = {}; struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd; int ret; - memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD; cmd->pd_handle = to_vpd(pd)->pd_handle; @@ -522,10 +510,7 @@ int pvrdma_dealloc_pd(struct ib_pd *pd) "could not dealloc protection domain, error: %d\n", ret); - kfree(to_vpd(pd)); atomic_dec(&dev->num_pds); - - return 0; } /** diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index f7f758d60110..ed91baad1ffa 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -399,10 +399,9 @@ int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata); int pvrdma_dealloc_ucontext(struct ib_ucontext *context); -struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); -int pvrdma_dealloc_pd(struct ib_pd *ibpd); +int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +void pvrdma_dealloc_pd(struct ib_pd *ibpd); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c index dcc1870b8d23..6033054b22fa 100644 --- a/drivers/infiniband/sw/rdmavt/pd.c +++ b/drivers/infiniband/sw/rdmavt/pd.c @@ -50,7 +50,7 @@ /** * rvt_alloc_pd - allocate a protection domain - * @ibdev: ib device + * @ibpd: PD * @context: optional user context * @udata: optional user data * @@ -58,19 +58,14 @@ * * Return: 0 on success */ -struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) +int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { + struct ib_device *ibdev = ibpd->device; struct rvt_dev_info *dev = ib_to_rvt(ibdev); - struct rvt_pd *pd; - struct ib_pd *ret; + struct rvt_pd *pd = ibpd_to_rvtpd(ibpd); + int ret = 0; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } /* * While we could continue allocating protecetion domains, being * constrained only by system resources. The IBTA spec defines that @@ -81,8 +76,7 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == dev->dparms.props.max_pd) { spin_unlock(&dev->n_pds_lock); - kfree(pd); - ret = ERR_PTR(-ENOMEM); + ret = -ENOMEM; goto bail; } @@ -92,8 +86,6 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = !!udata; - ret = &pd->ibpd; - bail: return ret; } @@ -104,16 +96,11 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, * * Return: always 0 */ -int rvt_dealloc_pd(struct ib_pd *ibpd) +void rvt_dealloc_pd(struct ib_pd *ibpd) { - struct rvt_pd *pd = ibpd_to_rvtpd(ibpd); struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); - - kfree(pd); - - return 0; } diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h index 1892ca4a9746..7a887e4a45e7 100644 --- a/drivers/infiniband/sw/rdmavt/pd.h +++ b/drivers/infiniband/sw/rdmavt/pd.h @@ -50,9 +50,8 @@ #include <rdma/rdma_vt.h> -struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata); -int rvt_dealloc_pd(struct ib_pd *ibpd); +int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +void rvt_dealloc_pd(struct ib_pd *ibpd); #endif /* DEF_RDMAVTPD_H */ diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 7de7389d0235..4c32aa852fc2 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -436,6 +436,7 @@ static const struct ib_device_ops rvt_dev_ops = { .req_notify_cq = rvt_req_notify_cq, .resize_cq = rvt_resize_cq, .unmap_fmr = rvt_unmap_fmr, + INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), }; static noinline int check_support(struct rvt_dev_info *rdi, int verb) diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 383e65c7bbc0..ad11605230a7 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -53,7 +53,6 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe) static void rxe_cleanup(struct rxe_dev *rxe) { rxe_pool_cleanup(&rxe->uc_pool); - rxe_pool_cleanup(&rxe->pd_pool); rxe_pool_cleanup(&rxe->ah_pool); rxe_pool_cleanup(&rxe->srq_pool); rxe_pool_cleanup(&rxe->qp_pool); @@ -190,11 +189,6 @@ static int rxe_init_pools(struct rxe_dev *rxe) if (err) goto err1; - err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD, - rxe->attr.max_pd); - if (err) - goto err2; - err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH, rxe->attr.max_ah); if (err) @@ -252,8 +246,6 @@ static int rxe_init_pools(struct rxe_dev *rxe) err4: rxe_pool_cleanup(&rxe->ah_pool); err3: - rxe_pool_cleanup(&rxe->pd_pool); -err2: rxe_pool_cleanup(&rxe->uc_pool); err1: return err; diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index b5c91df22047..b91ad30ecb2c 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -43,10 +43,6 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), }, - [RXE_TYPE_PD] = { - .name = "rxe-pd", - .size = sizeof(struct rxe_pd), - }, [RXE_TYPE_AH] = { .name = "rxe-ah", .size = sizeof(struct rxe_ah), diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 72968c29e01f..7ecdbdc1728b 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -45,7 +45,6 @@ enum rxe_pool_flags { enum rxe_elem_type { RXE_TYPE_UC, - RXE_TYPE_PD, RXE_TYPE_AH, RXE_TYPE_SRQ, RXE_TYPE_QP, diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index fd86fd2fbb26..7bd457b29493 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -345,7 +345,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; - rxe_add_ref(pd); rxe_add_ref(rcq); rxe_add_ref(scq); if (srq) @@ -378,7 +377,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, rxe_drop_ref(srq); rxe_drop_ref(scq); rxe_drop_ref(rcq); - rxe_drop_ref(pd); return err; } @@ -825,8 +823,6 @@ static void rxe_qp_do_cleanup(struct work_struct *work) rxe_drop_ref(qp->scq); if (qp->rcq) rxe_drop_ref(qp->rcq); - if (qp->pd) - rxe_drop_ref(qp->pd); if (qp->resp.mr) { rxe_drop_ref(qp->resp.mr); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 3d01247a28db..7c07b06a047e 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -191,23 +191,15 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num, return 0; } -static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, - struct ib_ucontext *context, - struct ib_udata *udata) +static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, + struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); - struct rxe_pd *pd; - - pd = rxe_alloc(&rxe->pd_pool); - return pd ? &pd->ibpd : ERR_PTR(-ENOMEM); + return 0; } -static int rxe_dealloc_pd(struct ib_pd *ibpd) +static void rxe_dealloc_pd(struct ib_pd *ibpd) { - struct rxe_pd *pd = to_rpd(ibpd); - - rxe_drop_ref(pd); - return 0; + return; } static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr, @@ -236,7 +228,6 @@ static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, if (!ah) return ERR_PTR(-ENOMEM); - rxe_add_ref(pd); ah->pd = pd; rxe_init_av(rxe, attr, &ah->av); @@ -271,7 +262,6 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); - rxe_drop_ref(ah->pd); rxe_drop_ref(ah); return 0; } @@ -351,7 +341,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, } rxe_add_index(srq); - rxe_add_ref(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, context, uresp); @@ -361,7 +350,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, return &srq->ibsrq; err2: - rxe_drop_ref(pd); rxe_drop_index(srq); rxe_drop_ref(srq); err1: @@ -420,7 +408,6 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq) if (srq->rq.queue) rxe_queue_cleanup(srq->rq.queue); - rxe_drop_ref(srq->pd); rxe_drop_index(srq); rxe_drop_ref(srq); @@ -962,8 +949,6 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) rxe_add_index(mr); - rxe_add_ref(pd); - err = rxe_mem_init_dma(pd, access, mr); if (err) goto err2; @@ -971,7 +956,6 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) return &mr->ibmr; err2: - rxe_drop_ref(pd); rxe_drop_index(mr); rxe_drop_ref(mr); err1: @@ -997,8 +981,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, rxe_add_index(mr); - rxe_add_ref(pd); - err = rxe_mem_init_user(pd, start, length, iova, access, udata, mr); if (err) @@ -1007,7 +989,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, return &mr->ibmr; err3: - rxe_drop_ref(pd); rxe_drop_index(mr); rxe_drop_ref(mr); err2: @@ -1019,7 +1000,6 @@ static int rxe_dereg_mr(struct ib_mr *ibmr) struct rxe_mem *mr = to_rmr(ibmr); mr->state = RXE_MEM_STATE_ZOMBIE; - rxe_drop_ref(mr->pd); rxe_drop_index(mr); rxe_drop_ref(mr); return 0; @@ -1045,8 +1025,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, rxe_add_index(mr); - rxe_add_ref(pd); - err = rxe_mem_init_fast(pd, max_num_sg, mr); if (err) goto err2; @@ -1054,7 +1032,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, return &mr->ibmr; err2: - rxe_drop_ref(pd); rxe_drop_index(mr); rxe_drop_ref(mr); err1: @@ -1190,6 +1167,7 @@ static const struct ib_device_ops rxe_dev_ops = { .reg_user_mr = rxe_reg_user_mr, .req_notify_cq = rxe_req_notify_cq, .resize_cq = rxe_resize_cq, + INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), }; int rxe_register_device(struct rxe_dev *rxe) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 74e04801d34d..ab9e3b4e1eb1 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -66,7 +66,6 @@ struct rxe_ucontext { }; struct rxe_pd { - struct rxe_pool_entry pelem; struct ib_pd ibpd; }; @@ -393,7 +392,6 @@ struct rxe_dev { int xmit_errors; struct rxe_pool uc_pool; - struct rxe_pool pd_pool; struct rxe_pool ah_pool; struct rxe_pool srq_pool; struct rxe_pool qp_pool; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4423de3b3738..f5b99ad58763 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2385,10 +2385,9 @@ struct ib_device_ops { int (*dealloc_ucontext)(struct ib_ucontext *context); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); - struct ib_pd *(*alloc_pd)(struct ib_device *device, - struct ib_ucontext *context, - struct ib_udata *udata); - int (*dealloc_pd)(struct ib_pd *pd); + int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); + void (*dealloc_pd)(struct ib_pd *pd); struct ib_ah *(*create_ah)(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, struct ib_udata *udata); @@ -2530,6 +2529,8 @@ struct ib_device_ops { */ int (*fill_res_entry)(struct sk_buff *msg, struct rdma_restrack_entry *entry); + + DECLARE_RDMA_OBJ_SIZE(ib_pd); }; struct rdma_restrack_root; -- 2.19.1