On Mon, Feb 11, 2019 at 8:56 PM Shiraz Saleem <shiraz.saleem@xxxxxxxxx> wrote: > > From: "Shiraz, Saleem" <shiraz.saleem@xxxxxxxxx> > > Use the for_each_sg_dma_page iterator variant to walk the umem > DMA-mapped SGL and get the page DMA address. This avoids the extra > loop to iterate pages in the SGE when for_each_sg iterator is used. > > Additionally, purge umem->page_shift usage in the driver > as its only relevant for ODP MRs. Use system page size and > shift instead. > > Cc: Selvin Xavier <selvin.xavier@xxxxxxxxxxxx> > Cc: Devesh Sharma <devesh.sharma@xxxxxxxxxxxx> > Signed-off-by: Shiraz, Saleem <shiraz.saleem@xxxxxxxxx> > --- > drivers/infiniband/hw/bnxt_re/ib_verbs.c | 23 +++++++++-------------- > drivers/infiniband/hw/bnxt_re/qplib_res.c | 9 +++++---- > 2 files changed, 14 insertions(+), 18 deletions(-) > > diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c > index 1d7469e..ed7cb0b 100644 > --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c > +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c > @@ -3566,19 +3566,14 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig, > u64 *pbl_tbl = pbl_tbl_orig; > u64 paddr; > u64 page_mask = (1ULL << page_shift) - 1; > - int i, pages; > - struct scatterlist *sg; > - int entry; > - > - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { > - pages = sg_dma_len(sg) >> PAGE_SHIFT; > - for (i = 0; i < pages; i++) { > - paddr = sg_dma_address(sg) + (i << PAGE_SHIFT); > - if (pbl_tbl == pbl_tbl_orig) > - *pbl_tbl++ = paddr & ~page_mask; > - else if ((paddr & page_mask) == 0) > - *pbl_tbl++ = paddr; > - } > + struct sg_dma_page_iter sg_iter; > + > + for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { > + paddr = sg_page_iter_dma_address(&sg_iter); > + if (pbl_tbl == pbl_tbl_orig) > + *pbl_tbl++ = paddr & ~page_mask; > + else if ((paddr & page_mask) == 0) > + *pbl_tbl++ = paddr; > } > return pbl_tbl - pbl_tbl_orig; > } > @@ -3641,7 +3636,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, > goto free_umem; > } > > - page_shift = umem->page_shift; > + page_shift = PAGE_SHIFT; I fear this will break Hugepage support also! > > if (!bnxt_re_page_size_ok(page_shift)) { > dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); > diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c > index c8502c2..c8478c1 100644 > --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c > +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c > @@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, > static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, > struct scatterlist *sghead, u32 pages, u32 pg_size) > { > - struct scatterlist *sg; > + struct sg_dma_page_iter sg_iter; > bool is_umem = false; > int i; > > @@ -116,12 +116,13 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, > } else { > i = 0; > is_umem = true; > - for_each_sg(sghead, sg, pages, i) { > - pbl->pg_map_arr[i] = sg_dma_address(sg); > - pbl->pg_arr[i] = sg_virt(sg); > + for_each_sg_dma_page(sghead, &sg_iter, pages, 0) { > + pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); > + pbl->pg_arr[i] = NULL; > if (!pbl->pg_arr[i]) > goto fail; > > + i++; > pbl->pg_count++; > } > } > -- > 1.8.3.1 >