6.5-stable review patch. If anyone has any objections, please let me know. ------------------ From: Leon Romanovsky <leonro@xxxxxxxxxx> commit c99a7457e5bb873914a74307ba2df85f6799203b upstream. During execution of mlx5_mkey_cache_cleanup(), there is a guarantee that MR are not registered and/or destroyed. It means that we don't need newly introduced cache disable flag. Fixes: 374012b00457 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup") Link: https://lore.kernel.org/r/c7e9c9f98c8ae4a7413d97d9349b29f5b0a23dbe.1695921626.git.leon@xxxxxxxxxx Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 - drivers/infiniband/hw/mlx5/mr.c | 5 ----- 2 files changed, 6 deletions(-) --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -797,7 +797,6 @@ struct mlx5_mkey_cache { struct dentry *fs_root; unsigned long last_add; struct delayed_work remove_ent_dwork; - u8 disable: 1; }; struct mlx5_ib_port_resources { --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1026,7 +1026,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5 return; mutex_lock(&dev->cache.rb_lock); - dev->cache.disable = true; for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); xa_lock_irq(&ent->mkeys); @@ -1824,10 +1823,6 @@ static int cache_ent_find_and_store(stru } mutex_lock(&cache->rb_lock); - if (cache->disable) { - mutex_unlock(&cache->rb_lock); - return 0; - } ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); if (ent) { if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {