From: Leon Romanovsky <leonro@xxxxxxxxxx> During execution of mlx5_mkey_cache_cleanup(), there is a guarantee that MR are not registered and/or destroyed. It means that we don't need newly introduced cache disable flag. Fixes: 374012b00457 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup") Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx> --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 - drivers/infiniband/hw/mlx5/mr.c | 5 ----- 2 files changed, 6 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d1ff98aad162..16713baf0d06 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -798,7 +798,6 @@ struct mlx5_mkey_cache { struct dentry *fs_root; unsigned long last_add; struct delayed_work remove_ent_dwork; - u8 disable: 1; }; struct mlx5_ib_port_resources { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 433f96459246..8a3762d9ff58 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1026,7 +1026,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) return; mutex_lock(&dev->cache.rb_lock); - dev->cache.disable = true; for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); xa_lock_irq(&ent->mkeys); @@ -1830,10 +1829,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, } mutex_lock(&cache->rb_lock); - if (cache->disable) { - mutex_unlock(&cache->rb_lock); - return 0; - } ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); if (ent) { if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { -- 2.41.0