On 2024/4/11 11:38, Zhengchao Shao wrote: > As described in the ib_map_mr_sg function comment, it returns the number > of sg elements that were mapped to the memory region. However, > hns_roce_map_mr_sg returns the number of pages required for mapping the > DMA area. Fix it. > > Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process") > Signed-off-by: Zhengchao Shao <shaozhengchao@xxxxxxxxxx> Thanks, Reviewed-by: Junxian Huang <huangjunxian6@xxxxxxxxxxxxx> > --- > v2: fix the return value and coding format issues > --- > drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++-------- > 1 file changed, 7 insertions(+), 8 deletions(-) > > diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c > index 9e05b57a2d67..80c050d7d0ea 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_mr.c > +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c > @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, > struct ib_device *ibdev = &hr_dev->ib_dev; > struct hns_roce_mr *mr = to_hr_mr(ibmr); > struct hns_roce_mtr *mtr = &mr->pbl_mtr; > - int ret = 0; > + int ret, sg_num = 0; > > mr->npages = 0; > mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, > sizeof(dma_addr_t), GFP_KERNEL); > if (!mr->page_list) > - return ret; > + return sg_num; > > - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); > - if (ret < 1) { > + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); > + if (sg_num < 1) { > ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", > - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); > + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); > goto err_page_list; > } > > @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, > ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); > if (ret) { > ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); > - ret = 0; > + sg_num = 0; > } else { > mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); > - ret = mr->npages; > } > > err_page_list: > kvfree(mr->page_list); > mr->page_list = NULL; > > - return ret; > + return sg_num; > } > > static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,