在 2022/4/14 21:52, Jason Gunthorpe 写道:
On Thu, Apr 14, 2022 at 09:01:29PM +0800, Yanjun Zhu wrote:
Still no, this does almost every allocation - only AH with the
non-blocking flag set should use this path.
To the function ib_send_cm_req, the call chain is as below.
ib_send_cm_req --> cm_alloc_priv_msg --> cm_alloc_msg --> rdma_create_ah -->
_rdma_create_ah --> rxe_create_ah --> rxe_av_chk_attr -->__rxe_add_to_pool
As such, xa_lock_irqsave/irqrestore is selected.
As I keep saying, only the rxe_create_ah() with the non-blocking flag
set should use the GFP_ATOMIC. All other paths must use GFP_KERNEL.
Got it. The GFP_ATOMIC/GFP_KERNEL are used in different paths.
rxe_create_ah will use GFP_ATOMIC and others will use GFP_KERNEL.
So the codes should be as below:
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem
*elem, gfp_t gfp)
{
int err;
+ unsigned long flags;
if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
return -EINVAL;
@@ -168,10 +170,17 @@ int __rxe_add_to_pool(struct rxe_pool *pool,
struct rxe_pool_elem *elem)
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
- xa_lock_irq(&pool->xa);
- err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
- &pool->next, GFP_ATOMIC);
- xa_unlock_irq(&pool->xa);
+ if (gfp == GFP_ATOMIC) { /* for rxe_create_ah */
+ xa_lock_irqsave(&pool->xa, flags);
+ err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem,
pool->limit,
+ &pool->next, GFP_ATOMIC);
+ xa_unlock_irqrestore(&pool->xa, flags);
+ } else if (gfp == GFP_KERNEL) {
+ xa_lock_irq(&pool->xa);
+ err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem,
pool->limit,
+ &pool->next, GFP_KERNEL);
+ xa_unlock_irq(&pool->xa);
+ }
if (err)
goto err_cnt;
Please commnet.
Zhu Yanjun
Jason