On 20/05/2019 9:54, Leon Romanovsky wrote: > From: Leon Romanovsky <leonro@xxxxxxxxxxxx> > > Ensure that CQ is allocated and freed by IB/core and not by drivers. > > Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx> > --- > diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h > index 8d8d3bd47c35..2ceb8067b99a 100644 > --- a/drivers/infiniband/hw/efa/efa.h > +++ b/drivers/infiniband/hw/efa/efa.h > @@ -137,9 +137,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, > struct ib_qp_init_attr *init_attr, > struct ib_udata *udata); > void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); > -struct ib_cq *efa_create_cq(struct ib_device *ibdev, > - const struct ib_cq_init_attr *attr, > - struct ib_udata *udata); > +int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, > + struct ib_udata *udata); > struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, > u64 virt_addr, int access_flags, > struct ib_udata *udata); > diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c > index db974caf1eb1..27f8a473bde9 100644 > --- a/drivers/infiniband/hw/efa/efa_main.c > +++ b/drivers/infiniband/hw/efa/efa_main.c > @@ -220,6 +220,7 @@ static const struct ib_device_ops efa_dev_ops = { > .reg_user_mr = efa_reg_mr, > > INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), > + INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), > INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd), > INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext), > }; > diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c > index e57f8adde174..6ccb85950439 100644 > --- a/drivers/infiniband/hw/efa/efa_verbs.c > +++ b/drivers/infiniband/hw/efa/efa_verbs.c > @@ -859,8 +859,6 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) > efa_destroy_cq_idx(dev, cq->cq_idx); > dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, > DMA_FROM_DEVICE); > - > - kfree(cq); > } > > static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, > @@ -876,20 +874,23 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, > return 0; > } > > -static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > - int vector, struct ib_ucontext *ibucontext, > - struct ib_udata *udata) > +int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, > + struct ib_udata *udata) > { > + struct efa_ucontext *ucontext = rdma_udata_to_drv_context( > + udata, struct efa_ucontext, ibucontext); > + struct ib_device *ibdev = ibcq->device; > + struct efa_dev *dev = to_edev(ibdev); Nit, can we please keep the existing reverse xmas tree? > struct efa_ibv_create_cq_resp resp = {}; > struct efa_com_create_cq_params params; > struct efa_com_create_cq_result result; > - struct efa_dev *dev = to_edev(ibdev); > struct efa_ibv_create_cq cmd = {}; > + struct efa_cq *cq = to_ecq(ibcq); > bool cq_entry_inserted = false; > - struct efa_cq *cq; > + int entries = attr->cqe; > int err; > > - ibdev_dbg(ibdev, "create_cq entries %d\n", entries); > + ibdev_dbg(ibcq->device, "create_cq entries %d\n", entries); No need to change, we can keep using 'ibdev'. Same applies for other prints. > > if (entries < 1 || entries > dev->dev_attr.max_cq_depth) { > ibdev_dbg(ibdev, > @@ -900,7 +901,7 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > } > > if (!field_avail(cmd, num_sub_cqs, udata->inlen)) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Incompatible ABI params, no input udata\n"); > err = -EINVAL; > goto err_out; > @@ -909,7 +910,7 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > if (udata->inlen > sizeof(cmd) && > !ib_is_udata_cleared(udata, sizeof(cmd), > udata->inlen - sizeof(cmd))) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Incompatible ABI params, unknown fields in udata\n"); > err = -EINVAL; > goto err_out; > @@ -918,45 +919,39 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > err = ib_copy_from_udata(&cmd, udata, > min(sizeof(cmd), udata->inlen)); > if (err) { > - ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n"); > + ibdev_dbg(ibcq->device, "Cannot copy udata for create_cq\n"); > goto err_out; > } > > if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Incompatible ABI params, unknown fields in udata\n"); > err = -EINVAL; > goto err_out; > } > > if (!cmd.cq_entry_size) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Invalid entry size [%u]\n", cmd.cq_entry_size); > err = -EINVAL; > goto err_out; > } > > if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Invalid number of sub cqs[%u] expected[%u]\n", > cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq); > err = -EINVAL; > goto err_out; > } > > - cq = kzalloc(sizeof(*cq), GFP_KERNEL); > - if (!cq) { > - err = -ENOMEM; > - goto err_out; > - } > - > - cq->ucontext = to_eucontext(ibucontext); > + cq->ucontext = ucontext; > cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); > cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, > DMA_FROM_DEVICE); > if (!cq->cpu_addr) { > err = -ENOMEM; > - goto err_free_cq; > + goto err_out; > } > > params.uarn = cq->ucontext->uarn; > @@ -975,7 +970,7 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > > err = cq_mmap_entries_setup(dev, cq, &resp); > if (err) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Could not setup cq[%u] mmap entries\n", cq->cq_idx); > goto err_destroy_cq; > } > @@ -986,17 +981,17 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > err = ib_copy_to_udata(udata, &resp, > min(sizeof(resp), udata->outlen)); > if (err) { > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Failed to copy udata for create_cq\n"); > goto err_destroy_cq; > } > } > > - ibdev_dbg(ibdev, > + ibdev_dbg(ibcq->device, > "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n", > cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); > > - return &cq->ibcq; > + return 0; > > err_destroy_cq: > efa_destroy_cq_idx(dev, cq->cq_idx); > @@ -1005,23 +1000,9 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, > DMA_FROM_DEVICE); > if (!cq_entry_inserted) > free_pages_exact(cq->cpu_addr, cq->size); > -err_free_cq: > - kfree(cq); > err_out: > atomic64_inc(&dev->stats.sw_stats.create_cq_err); > - return ERR_PTR(err); > -} > - > -struct ib_cq *efa_create_cq(struct ib_device *ibdev, > - const struct ib_cq_init_attr *attr, > - struct ib_udata *udata) > -{ > - struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata, > - struct efa_ucontext, > - ibucontext); > - > - return do_create_cq(ibdev, attr->cqe, attr->comp_vector, > - &ucontext->ibucontext, udata); > + return err; > }