Currently the lookup of an object from its index and taking a reference to the object are incorrectly protected by an rcu_read_lock but this does not make the xa_load and the kref_get combination an atomic operation. The various write operations need to share the xarray state in a mixture of user, soft irq and hard irq contexts so the xa_locking must support that. This patch replaces the xa locks with xa_lock_irqsave. Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by xarrays") Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> --- drivers/infiniband/sw/rxe/rxe_pool.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 6215c6de3a84..f2b586249793 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -119,8 +119,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool) int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, bool sleepable) { - int err; + struct xarray *xa = &pool->xa; + unsigned long flags; gfp_t gfp_flags; + int err; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto err_cnt; @@ -138,8 +140,10 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, if (sleepable) might_sleep(); - err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit, + xa_lock_irqsave(xa, flags); + err = __xa_alloc_cyclic(xa, &elem->index, NULL, pool->limit, &pool->next, gfp_flags); + xa_unlock_irqrestore(xa, flags); if (err < 0) goto err_cnt; @@ -154,15 +158,16 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { struct rxe_pool_elem *elem; struct xarray *xa = &pool->xa; + unsigned long flags; void *obj; - rcu_read_lock(); + xa_lock_irqsave(xa, flags); elem = xa_load(xa, index); if (elem && kref_get_unless_zero(&elem->ref_cnt)) obj = elem->obj; else obj = NULL; - rcu_read_unlock(); + xa_unlock_irqrestore(xa, flags); return obj; } @@ -179,6 +184,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) struct rxe_pool *pool = elem->pool; struct xarray *xa = &pool->xa; static int timeout = RXE_POOL_TIMEOUT; + unsigned long flags; int ret, err = 0; void *xa_ret; @@ -188,7 +194,9 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) /* erase xarray entry to prevent looking up * the pool elem from its index */ - xa_ret = xa_erase(xa, elem->index); + xa_lock_irqsave(xa, flags); + xa_ret = __xa_erase(xa, elem->index); + xa_unlock_irqrestore(xa, flags); WARN_ON(xa_err(xa_ret)); /* if this is the last call to rxe_put complete the @@ -249,8 +257,12 @@ int __rxe_put(struct rxe_pool_elem *elem) void __rxe_finalize(struct rxe_pool_elem *elem) { + struct xarray *xa = &elem->pool->xa; + unsigned long flags; void *xa_ret; - xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL); + xa_lock_irqsave(xa, flags); + xa_ret = __xa_store(xa, elem->index, elem, GFP_KERNEL); + xa_unlock_irqrestore(xa, flags); WARN_ON(xa_err(xa_ret)); } base-commit: 5f004bcaee4cb552cf1b46a505f18f08777db7e5 -- 2.39.2