Rather than (ab)use RCU for this kind of thing, add a work queue item and use that for the unlikely case of a notification being done inside another notification. This cleans up the code and better adheres to expected usage of RCU, and it means that the odd ->ops serializing can be removed as queue_work() will handle that for us. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 100d5da94cb9..8ecf3c106f89 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -17,12 +17,8 @@ struct io_ev_fd { /* protected by ->completion_lock */ unsigned last_cq_tail; refcount_t refs; - atomic_t ops; struct rcu_head rcu; -}; - -enum { - IO_EVENTFD_OP_SIGNAL_BIT, + struct work_struct work; }; static void io_eventfd_free(struct rcu_head *rcu) @@ -39,9 +35,9 @@ static void io_eventfd_put(struct io_ev_fd *ev_fd) call_rcu(&ev_fd->rcu, io_eventfd_free); } -static void io_eventfd_do_signal(struct rcu_head *rcu) +static void io_eventfd_do_signal(struct work_struct *work) { - struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); + struct io_ev_fd *ev_fd = container_of(work, struct io_ev_fd, work); eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE); io_eventfd_put(ev_fd); @@ -63,11 +59,7 @@ static bool __io_eventfd_signal(struct io_ev_fd *ev_fd) eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE); return true; } - if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) { - call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal); - return false; - } - return true; + return !queue_work(system_unbound_wq, &ev_fd->work); } /* @@ -184,7 +176,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, ev_fd->eventfd_async = eventfd_async; ctx->has_evfd = true; refcount_set(&ev_fd->refs, 1); - atomic_set(&ev_fd->ops, 0); + INIT_WORK(&ev_fd->work, io_eventfd_do_signal); rcu_assign_pointer(ctx->io_ev_fd, ev_fd); return 0; } -- Jens Axboe