> On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@xxxxxxxxx> wrote: > > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@xxxxxxxxx> wrote: >> >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to >> free the memory at mr->map. This patch adds code to do that. >> This error only occurs if page_address() fails to return a non zero address >> which should never happen for 64 bit architectures. > > If this will never happen for 64 bit architectures, is it possible to > exclude 64 bit architecture with some MACROs or others? > > Thanks, > > Zhu Yanjun > >> >> Fixes: 8700e3e7c485 ("Soft RoCE driver") >> Reported by: Haakon Bugge <haakon.bugge@xxxxxxxxxx> >> Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> >> --- >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++------------- >> 1 file changed, 24 insertions(+), 17 deletions(-) >> >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c >> index 6aabcb4de235..f49baff9ca3d 100644 >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, >> int access, struct rxe_mr *mr) >> { >> - struct rxe_map **map; >> - struct rxe_phys_buf *buf = NULL; >> - struct ib_umem *umem; >> - struct sg_page_iter sg_iter; >> - int num_buf; >> - void *vaddr; >> + struct rxe_map **map; >> + struct rxe_phys_buf *buf = NULL; >> + struct ib_umem *umem; >> + struct sg_page_iter sg_iter; >> + int num_buf; >> + void *vaddr; This white-space stripping must be another issue, not related to the memleak? >> int err; >> + int i; >> >> umem = ib_umem_get(pd->ibpd.device, start, length, access); >> if (IS_ERR(umem)) { >> - pr_warn("err %d from rxe_umem_get\n", >> - (int)PTR_ERR(umem)); >> + pr_warn("%s: Unable to pin memory region err = %d\n", >> + __func__, (int)PTR_ERR(umem)); >> err = PTR_ERR(umem); >> - goto err1; >> + goto err_out; >> } >> >> mr->umem = umem; >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, >> >> err = rxe_mr_alloc(mr, num_buf); >> if (err) { >> - pr_warn("err %d from rxe_mr_alloc\n", err); >> - ib_umem_release(umem); >> - goto err1; >> + pr_warn("%s: Unable to allocate memory for map\n", >> + __func__); >> + goto err_release_umem; >> } >> >> mr->page_shift = PAGE_SHIFT; >> mr->page_mask = PAGE_SIZE - 1; >> >> - num_buf = 0; >> + num_buf = 0; White-space change. Otherwise: Reviewed-by: Håkon Bugge <haakon.bugge@xxxxxxxxxx> Thxs, Håkon >> map = mr->map; >> if (length > 0) { >> buf = map[0]->buf; >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, >> >> vaddr = page_address(sg_page_iter_page(&sg_iter)); >> if (!vaddr) { >> - pr_warn("null vaddr\n"); >> - ib_umem_release(umem); >> + pr_warn("%s: Unable to get virtual address\n", >> + __func__); >> err = -ENOMEM; >> - goto err1; >> + goto err_cleanup_map; >> } >> >> buf->addr = (uintptr_t)vaddr; >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, >> >> return 0; >> >> -err1: >> +err_cleanup_map: >> + for (i = 0; i < mr->num_map; i++) >> + kfree(mr->map[i]); >> + kfree(mr->map); >> +err_release_umem: >> + ib_umem_release(umem); >> +err_out: >> return err; >> } >> >> -- >> 2.30.2 >>