From: Zhu Yanjun <yanjun.zhu@xxxxxxxxx>
This is a dead lock problem.
The ah_pool xa_lock first is acquired in this:
{SOFTIRQ-ON-W} state was registered at:
lock_acquire+0x1d2/0x5a0
_raw_spin_lock+0x33/0x80
__rxe_add_to_pool+0x183/0x230 [rdma_rxe]
Then ah_pool xa_lock is acquired in this:
{IN-SOFTIRQ-W}:
Call Trace:
<TASK>
dump_stack_lvl+0x44/0x57
mark_lock.part.52.cold.79+0x3c/0x46
__lock_acquire+0x1565/0x34a0
lock_acquire+0x1d2/0x5a0
_raw_spin_lock_irqsave+0x42/0x90
rxe_pool_get_index+0x72/0x1d0 [rdma_rxe]
rxe_get_av+0x168/0x2a0 [rdma_rxe]
</TASK>
From the above, in the function __rxe_add_to_pool,
xa_lock is acquired. Then the function __rxe_add_to_pool
is interrupted by softirq. The function
rxe_pool_get_index will also acquire xa_lock.
Finally, the dead lock appears.
CPU0
----
lock(&xa->xa_lock#15); <----- __rxe_add_to_pool
<Interrupt>
lock(&xa->xa_lock#15); <---- rxe_pool_get_index
*** DEADLOCK ***
Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by carrays")
Reported-and-tested-by: Yi Zhang <yi.zhang@xxxxxxxxxx>
Signed-off-by: Zhu Yanjun <yanjun.zhu@xxxxxxxxx>
---
V5->V6: One dead lock fix in one commit
V4->V5: Commit logs are changed.
V3->V4: xa_lock_irq locks are used.
V2->V3: __rxe_add_to_pool is between spin_lock and spin_unlock, so
GFP_ATOMIC is used in __rxe_add_to_pool.
V1->V2: Replace GFP_KERNEL with GFP_ATOMIC
---
drivers/infiniband/sw/rxe/rxe_pool.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 87066d04ed18..67f1d4733682 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -106,7 +106,7 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
atomic_set(&pool->num_elem, 0);
- xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
pool->limit.min = info->min_index;
pool->limit.max = info->max_index;
}
@@ -155,6 +155,7 @@ void *rxe_alloc(struct rxe_pool *pool)
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
int err;
+ unsigned long flags;
if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
return -EINVAL;
@@ -166,8 +167,10 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
- &pool->next, GFP_KERNEL);
+ xa_lock_irqsave(&pool->xa, flags);
+ err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ &pool->next, GFP_ATOMIC);
+ xa_unlock_irqrestore(&pool->xa, flags);
if (err)
goto err_cnt;
@@ -201,7 +204,7 @@ static void rxe_elem_release(struct kref *kref)
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
struct rxe_pool *pool = elem->pool;
- xa_erase(&pool->xa, elem->index);
+ xa_erase_irq(&pool->xa, elem->index);
if (pool->cleanup)
pool->cleanup(elem);