> From: linux-rdma-owner@xxxxxxxxxxxxxxx <linux-rdma- > owner@xxxxxxxxxxxxxxx> On Behalf Of Gal Pressman > > On 20/08/2019 15:18, Michal Kalderon wrote: > > int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { > > + struct efa_ucontext *ucontext; > > Reverse xmas tree please. ok > > > struct efa_dev *dev = to_edev(ibqp->pd->device); > > struct efa_qp *qp = to_eqp(ibqp); > > int err; > > > > ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); > > + ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, > > + ibucontext); > > Consider initializing on ucontext declaration. ok > > > + > > err = efa_destroy_qp_handle(dev, qp->qp_handle); > > if (err) > > return err; > > > > + rdma_user_mmap_entry_remove(&ucontext->ibucontext, qp- > >sq_db_mmap_key); > > + rdma_user_mmap_entry_remove(&ucontext->ibucontext, > > + qp->llq_desc_mmap_key); > > The mmap entries removal should happen before efa_destroy_qp_handle. ok > > > if (qp->rq_cpu_addr) { > > ibdev_dbg(&dev->ibdev, > > "qp->cpu_addr[0x%p] freed: size[%lu], > dma[%pad]\n", @@ -503,6 > > +392,10 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata > *udata) > > &qp->rq_dma_addr); > > dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, > qp->rq_size, > > DMA_TO_DEVICE); > > + rdma_user_mmap_entry_remove(&ucontext->ibucontext, > > + qp->rq_mmap_key); > > + rdma_user_mmap_entry_remove(&ucontext->ibucontext, > > + qp->rq_db_mmap_key); > > Same. ok > > > } > > > > kfree(qp); > > @@ -515,46 +408,55 @@ static int qp_mmap_entries_setup(struct efa_qp > *qp, > > struct efa_com_create_qp_params > *params, > > struct efa_ibv_create_qp_resp *resp) { > > + u64 address; > > + u64 length; > > + > > /* > > * Once an entry is inserted it might be mmapped, hence cannot be > > * cleaned up until dealloc_ucontext. > > */ > > resp->sq_db_mmap_key = > > Not a big deal, but now it makes more sense to assign qp- > >sq_db_mmap_key and assign the response later on. ok > > > - mmap_entry_insert(dev, ucontext, qp, > > - dev->db_bar_addr + resp->sq_db_offset, > > - PAGE_SIZE, EFA_MMAP_IO_NC); > > - if (resp->sq_db_mmap_key == EFA_MMAP_INVALID) > > + rdma_user_mmap_entry_insert(&ucontext->ibucontext, qp, > > + dev->db_bar_addr + > > + resp->sq_db_offset, > > + PAGE_SIZE, EFA_MMAP_IO_NC); > > + if (resp->sq_db_mmap_key == RDMA_USER_MMAP_INVALID) > > return -ENOMEM; > > - > > + qp->sq_db_mmap_key = resp->sq_db_mmap_key; > > resp->sq_db_offset &= ~PAGE_MASK; > > > > + address = dev->mem_bar_addr + resp->llq_desc_offset; > > + length = PAGE_ALIGN(params->sq_ring_size_in_bytes + > > + (resp->llq_desc_offset & ~PAGE_MASK)); > > resp->llq_desc_mmap_key = > > - mmap_entry_insert(dev, ucontext, qp, > > - dev->mem_bar_addr + resp- > >llq_desc_offset, > > - PAGE_ALIGN(params- > >sq_ring_size_in_bytes + > > - (resp->llq_desc_offset & > ~PAGE_MASK)), > > - EFA_MMAP_IO_WC); > > - if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID) > > + rdma_user_mmap_entry_insert(&ucontext->ibucontext, qp, > > + address, > > + length, > > + EFA_MMAP_IO_WC); > > + if (resp->llq_desc_mmap_key == RDMA_USER_MMAP_INVALID) > > return -ENOMEM; > > - > > + qp->llq_desc_mmap_key = resp->llq_desc_mmap_key; > > resp->llq_desc_offset &= ~PAGE_MASK; > > > > if (qp->rq_size) { > > + address = dev->db_bar_addr + resp->rq_db_offset; > > resp->rq_db_mmap_key = > > - mmap_entry_insert(dev, ucontext, qp, > > - dev->db_bar_addr + resp- > >rq_db_offset, > > - PAGE_SIZE, EFA_MMAP_IO_NC); > > - if (resp->rq_db_mmap_key == EFA_MMAP_INVALID) > > + rdma_user_mmap_entry_insert(&ucontext- > >ibucontext, qp, > > + address, PAGE_SIZE, > > + EFA_MMAP_IO_NC); > > + if (resp->rq_db_mmap_key == > RDMA_USER_MMAP_INVALID) > > return -ENOMEM; > > - > > + qp->rq_db_mmap_key = resp->rq_db_mmap_key; > > resp->rq_db_offset &= ~PAGE_MASK; > > > > + address = virt_to_phys(qp->rq_cpu_addr); > > resp->rq_mmap_key = > > - mmap_entry_insert(dev, ucontext, qp, > > - virt_to_phys(qp->rq_cpu_addr), > > - qp->rq_size, > EFA_MMAP_DMA_PAGE); > > - if (resp->rq_mmap_key == EFA_MMAP_INVALID) > > + rdma_user_mmap_entry_insert(&ucontext- > >ibucontext, qp, > > + address, qp->rq_size, > > + EFA_MMAP_DMA_PAGE); > > + if (resp->rq_mmap_key == RDMA_USER_MMAP_INVALID) > > return -ENOMEM; > > + qp->rq_mmap_key = resp->rq_mmap_key; > > > > resp->rq_mmap_size = qp->rq_size; > > } > > @@ -775,6 +677,9 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, > > DMA_TO_DEVICE); > > if (!rq_entry_inserted) > > Now that we store the keys on the QP object we can remove the > rq_entry_inserted variable and test for !qp->rq_mmap_key. ok > > > free_pages_exact(qp->rq_cpu_addr, qp->rq_size); > > + else > > + rdma_user_mmap_entry_remove(&ucontext- > >ibucontext, > > + qp->rq_mmap_key); > > Other entries need to be removed as well, otherwise the refcount won't > reach zero. This error flow should now be similar to efa_destroy_qp. I think > that means losing the free_pages_exact too. Not sure I understand, how can we loose the free_pages_exact ? if the entry wasn’t Inserted into the mmap_xa what flow will free the pages ? > > > } > > err_free_qp: > > kfree(qp); > > Pretty much the same comments for the CQ parts as the QP. ok