On Tue, Apr 25, 2023 at 11:18:42AM -0700, Stefan Roesch wrote: > +void __io_napi_add(struct io_ring_ctx *ctx, struct file *file) > +{ > + unsigned int napi_id; > + struct socket *sock; > + struct sock *sk; > + struct io_napi_ht_entry *he; > + > + sock = sock_from_file(file); > + if (!sock) > + return; > + > + sk = sock->sk; > + if (!sk) > + return; > + > + napi_id = READ_ONCE(sk->sk_napi_id); > + > + /* Non-NAPI IDs can be rejected. */ > + if (napi_id < MIN_NAPI_ID) > + return; > + > + spin_lock(&ctx->napi_lock); > + hash_for_each_possible(ctx->napi_ht, he, node, napi_id) { > + if (he->napi_id == napi_id) { > + he->timeout = jiffies + NAPI_TIMEOUT; > + goto out; > + } > + } > + > + he = kmalloc(sizeof(*he), GFP_NOWAIT); > + if (!he) > + goto out; > + > + he->napi_id = napi_id; > + he->timeout = jiffies + NAPI_TIMEOUT; > + hash_add(ctx->napi_ht, &he->node, napi_id); > + > + list_add_tail(&he->list, &ctx->napi_list); > + > +out: > + spin_unlock(&ctx->napi_lock); > +} What about using GFP_KERNEL to allocate 'he' outside the spin lock, then kfree() it in the (he->napi_id == napi_id) path after unlock? That would make the critical section shorter. Also, GFP_NOWAIT is likely to fail under memory pressure. -- Ammar Faizi