> Subject: [PATCH for-rc 4/6] RDMA/efa: Use API to get contiguous memory blocks > aligned to device supported page size > > Use the ib_umem_find_best_pgsz() and rdma_for_each_block() API when > registering an MR instead of coding it in the driver. > > ib_umem_find_best_pgsz() is used to find the best suitable page size which > replaces the existing efa_cont_pages() implementation. > rdma_for_each_block() is used to iterate the umem in aligned contiguous memory > blocks. > > Cc: Shiraz Saleem <shiraz.saleem@xxxxxxxxx> > Reviewed-by: Firas JahJah <firasj@xxxxxxxxxx> > Signed-off-by: Gal Pressman <galpress@xxxxxxxxxx> > --- > drivers/infiniband/hw/efa/efa_verbs.c | 81 +++++---------------------- > 1 file changed, 14 insertions(+), 67 deletions(-) > > diff --git a/drivers/infiniband/hw/efa/efa_verbs.c > b/drivers/infiniband/hw/efa/efa_verbs.c > index 0640c2435f67..c1246c39f234 100644 > --- a/drivers/infiniband/hw/efa/efa_verbs.c > +++ b/drivers/infiniband/hw/efa/efa_verbs.c > @@ -1054,21 +1054,15 @@ static int umem_to_page_list(struct efa_dev *dev, > u8 hp_shift) > { > u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT); > - struct sg_dma_page_iter sg_iter; > - unsigned int page_idx = 0; > + struct ib_block_iter biter; > unsigned int hp_idx = 0; > > ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n", > hp_cnt, pages_in_hp); > > - for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) > { > - if (page_idx % pages_in_hp == 0) { > - page_list[hp_idx] = sg_page_iter_dma_address(&sg_iter); > - hp_idx++; > - } > - > - page_idx++; > - } > + rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, > + BIT(hp_shift)) > + page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); > > return 0; > } > @@ -1402,56 +1396,6 @@ static int efa_create_pbl(struct efa_dev *dev, > return 0; > } > > -static void efa_cont_pages(struct ib_umem *umem, u64 addr, > - unsigned long max_page_shift, > - int *count, u8 *shift, u32 *ncont) > -{ > - struct scatterlist *sg; > - u64 base = ~0, p = 0; > - unsigned long tmp; > - unsigned long m; > - u64 len, pfn; > - int i = 0; > - int entry; > - > - addr = addr >> PAGE_SHIFT; > - tmp = (unsigned long)addr; > - m = find_first_bit(&tmp, BITS_PER_LONG); > - if (max_page_shift) > - m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m); > - > - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { > - len = DIV_ROUND_UP(sg_dma_len(sg), PAGE_SIZE); > - pfn = sg_dma_address(sg) >> PAGE_SHIFT; > - if (base + p != pfn) { > - /* > - * If either the offset or the new > - * base are unaligned update m > - */ > - tmp = (unsigned long)(pfn | p); > - if (!IS_ALIGNED(tmp, 1 << m)) > - m = find_first_bit(&tmp, BITS_PER_LONG); > - > - base = pfn; > - p = 0; > - } > - > - p += len; > - i += len; > - } > - > - if (i) { > - m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); > - *ncont = DIV_ROUND_UP(i, (1 << m)); > - } else { > - m = 0; > - *ncont = 0; > - } > - > - *shift = PAGE_SHIFT + m; > - *count = i; > -} > - Leon - perhaps mlx5_ib_cont_pages() can also be replaced with the new core helper? Reviewed-by: Shiraz Saleem <shiraz.saleem@xxxxxxxxx>