When destroying a DMA mmapped object, there is no need to delay the pages freeing to dealloc_ucontext as the kernel itself will keep reference count for these pages. Remove the special handling of DMA pages and call free_pages_exact on destroy_qp/cq. Reviewed-by: Firas JahJah <firasj@xxxxxxxxxx> Reviewed-by: Yossi Leybovich <sleybo@xxxxxxxxxx> Signed-off-by: Gal Pressman <galpress@xxxxxxxxxx> --- drivers/infiniband/hw/efa/efa_verbs.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 9a6cff718c49..fd2fc60d569f 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -413,6 +413,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) &qp->rq_dma_addr); dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); + free_pages_exact(qp->rq_cpu_addr, qp->rq_size); } efa_qp_user_mmap_entries_remove(qp); @@ -723,9 +724,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, if (qp->rq_size) { dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); - - if (!qp->rq_mmap_entry) - free_pages_exact(qp->rq_cpu_addr, qp->rq_size); + free_pages_exact(qp->rq_cpu_addr, qp->rq_size); } err_free_qp: kfree(qp); @@ -848,6 +847,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) efa_destroy_cq_idx(dev, cq->cq_idx); dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, DMA_FROM_DEVICE); + free_pages_exact(cq->cpu_addr, cq->size); rdma_user_mmap_entry_remove(cq->mmap_entry); } @@ -987,8 +987,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, err_free_mapped: dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, DMA_FROM_DEVICE); - if (!cq->mmap_entry) - free_pages_exact(cq->cpu_addr, cq->size); + free_pages_exact(cq->cpu_addr, cq->size); err_out: atomic64_inc(&dev->stats.sw_stats.create_cq_err); @@ -1549,10 +1548,6 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct efa_user_mmap_entry *entry = to_emmap(rdma_entry); - /* DMA mapping is already gone, now free the pages */ - if (entry->mmap_flag == EFA_MMAP_DMA_PAGE) - free_pages_exact(phys_to_virt(entry->address), - entry->rdma_entry.npages * PAGE_SIZE); kfree(entry); } -- 2.24.1