From: Aharon Landau <aharonl@xxxxxxxxxx> Keep track of the mkey size of all cacheable mkeys, and by this allow to rereg them. Signed-off-by: Aharon Landau <aharonl@xxxxxxxxxx> Reviewed-by: Michael Guralnik <michaelgur@xxxxxxxxxx> --- drivers/infiniband/hw/mlx5/mr.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b8529f73b306..ea8634cafa9c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -709,6 +709,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, u8 access_mode, kfree(mr); return ERR_PTR(err); } + mr->mmkey.ndescs = ndescs; } mr->mmkey.type = MLX5_MKEY_MR; init_waitqueue_head(&mr->mmkey.wait); @@ -1372,9 +1373,6 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, { struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - /* We only track the allocated sizes of MRs from the cache */ - if (!mr->mmkey.cache_ent) - return false; if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) return false; @@ -1382,8 +1380,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); if (WARN_ON(!*page_size)) return false; - return (1ULL << mr->mmkey.cache_ent->order) >= - ib_umem_num_dma_blocks(new_umem, *page_size); + return mr->mmkey.ndescs >= ib_umem_num_dma_blocks(new_umem, *page_size); } static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, -- 2.17.2