On Fri, Mar 06, 2020 at 10:44:53AM +0800, Weihang Li wrote: > From: Xi Wang <wangxi11@xxxxxxxxxx> > > Currently, before the qp is created, a page size needs to be calculated for > the base address table to store all base addresses in the mtr. As a result, > the parameter configuration of the mtr is complex. So integrate the process > of calculating the base table page size into the hem related interface to > simplify the process of using mtr. > > Signed-off-by: Xi Wang <wangxi11@xxxxxxxxxx> > Signed-off-by: Weihang Li <liweihang@xxxxxxxxxx> > --- > drivers/infiniband/hw/hns/hns_roce_device.h | 4 --- > drivers/infiniband/hw/hns/hns_roce_hem.c | 16 +++++++---- > drivers/infiniband/hw/hns/hns_roce_qp.c | 42 +++++++---------------------- > 3 files changed, 21 insertions(+), 41 deletions(-) > > diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h > index b6ae12d..f6b3cf6 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_device.h > +++ b/drivers/infiniband/hw/hns/hns_roce_device.h > @@ -669,10 +669,6 @@ struct hns_roce_qp { > struct ib_umem *umem; > struct hns_roce_mtt mtt; > struct hns_roce_mtr mtr; > - > - /* this define must less than HNS_ROCE_MAX_BT_REGION */ > -#define HNS_ROCE_WQE_REGION_MAX 3 > - struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX]; > int wqe_bt_pg_shift; > > u32 buff_size; > diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c > index e822157..8380d71 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_hem.c > +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c > @@ -1383,6 +1383,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, > void *cpu_base; > u64 phy_base; > int ret = 0; > + int ba_num; > int offset; > int total; > int step; > @@ -1393,12 +1394,16 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, > if (root_hem) > return 0; > > + ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit); > + if (ba_num < 1) > + return -ENOMEM; > + > INIT_LIST_HEAD(&temp_root); > - total = r->offset; > + offset = r->offset; > /* indicate to last region */ > r = ®ions[region_cnt - 1]; > - root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1, > - unit, true, 0); > + root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, > + ba_num, true, 0); > if (!root_hem) > return -ENOMEM; > list_add(&root_hem->list, &temp_root); > @@ -1410,7 +1415,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, > INIT_LIST_HEAD(&temp_list[i]); > > total = 0; > - for (i = 0; i < region_cnt && total < unit; i++) { > + for (i = 0; i < region_cnt && total < ba_num; i++) { > r = ®ions[i]; > if (!r->count) > continue; > @@ -1443,7 +1448,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, > /* if exist mid bt, link L1 to L0 */ > list_for_each_entry_safe(hem, temp_hem, > &hem_list->mid_bt[i][1], list) { > - offset = hem->start / step * BA_BYTE_LEN; > + offset = (hem->start - r->offset) / step * > + BA_BYTE_LEN; > hem_list_link_bt(hr_dev, cpu_base + offset, > hem->dma_addr); > total++; > diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c > index c2ea489..1c5de2b 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_qp.c > +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c > @@ -579,30 +579,6 @@ static int split_wqe_buf_region(struct hns_roce_dev *hr_dev, > return region_cnt; > } > > -static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev, > - struct hns_roce_buf_region *regions, > - int region_cnt) > -{ > - int bt_pg_shift; > - int ba_num; > - int ret; > - > - bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz; > - > - /* all root ba entries must in one bt page */ > - do { > - ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN; > - ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt, > - ba_num); > - if (ret <= ba_num) > - break; > - > - bt_pg_shift++; > - } while (ret > ba_num); > - > - return bt_pg_shift - PAGE_SHIFT; > -} > - > static int set_extend_sge_param(struct hns_roce_dev *hr_dev, > struct hns_roce_qp *hr_qp) > { > @@ -768,7 +744,10 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) > static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, > u32 page_shift, bool is_user) > { > - dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; > +/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */ > +#define HNS_ROCE_WQE_REGION_MAX 3 > + struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {}; > + dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = { NULL }; Nitpick, NULL is not needed. Thanks