This is done by creating a new RCU data structure (io_ev_fd) as part of io_ring_ctx that holds the eventfd_ctx. The function io_eventfd_signal is executed under rcu_read_lock with a single rcu_dereference to io_ev_fd so that if another thread unregisters the eventfd while io_eventfd_signal is still being executed, the eventfd_signal for which io_eventfd_signal was called completes successfully. The process of registering/unregistering eventfd is done under a lock so multiple threads don't enter a race condition while registering/unregistering eventfd. With the above approach ring quiesce can be avoided which is much more expensive then using RCU lock. On the system tested, io_uring_reigster with IORING_REGISTER_EVENTFD takes less than 1ms with RCU lock, compared to 15ms before with ring quiesce. Signed-off-by: Usama Arif <usama.arif@xxxxxxxxxxxxx> --- fs/io_uring.c | 91 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 25 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 21531609a9c6..47d48020ae27 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -326,6 +326,10 @@ struct io_submit_state { struct blk_plug plug; }; +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; +}; + struct io_ring_ctx { /* const or read-mostly hot data */ struct { @@ -399,7 +403,8 @@ struct io_ring_ctx { struct { unsigned cached_cq_tail; unsigned cq_entries; - struct eventfd_ctx *cq_ev_fd; + struct io_ev_fd __rcu *io_ev_fd; + struct mutex ev_fd_lock; struct wait_queue_head cq_wait; unsigned cq_extra; atomic_t cq_timeouts; @@ -1448,6 +1453,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); + mutex_init(&ctx->ev_fd_lock); init_waitqueue_head(&ctx->cq_wait); spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->timeout_lock); @@ -1726,13 +1732,24 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) return &rings->cqes[tail & mask]; } -static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) +static void io_eventfd_signal(struct io_ring_ctx *ctx) { - if (likely(!ctx->cq_ev_fd)) - return false; + struct io_ev_fd *ev_fd; + + rcu_read_lock(); + /* rcu_dereference ctx->io_ev_fd once and use it for both for checking and eventfd_signal */ + ev_fd = rcu_dereference(ctx->io_ev_fd); + + if (likely(!ev_fd)) + goto out; if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) - return false; - return !ctx->eventfd_async || io_wq_current_is_worker(); + goto out; + + if (!ctx->eventfd_async || io_wq_current_is_worker()) + eventfd_signal(ev_fd->cq_ev_fd, 1); + +out: + rcu_read_unlock(); } /* @@ -1751,8 +1768,7 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) */ if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); - if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + io_eventfd_signal(ctx); } static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) @@ -1764,8 +1780,7 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) if (waitqueue_active(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); } - if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + io_eventfd_signal(ctx); } /* Returns true if there are no backlogged entries after the flush */ @@ -9353,35 +9368,59 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) { + struct io_ev_fd *ev_fd; __s32 __user *fds = arg; - int fd; + int fd, ret; - if (ctx->cq_ev_fd) - return -EBUSY; + mutex_lock(&ctx->ev_fd_lock); + ret = -EBUSY; + if (rcu_dereference_protected(ctx->io_ev_fd, lockdep_is_held(&ctx->ev_fd_lock))) + goto out; + ret = -EFAULT; if (copy_from_user(&fd, fds, sizeof(*fds))) - return -EFAULT; + goto out; - ctx->cq_ev_fd = eventfd_ctx_fdget(fd); - if (IS_ERR(ctx->cq_ev_fd)) { - int ret = PTR_ERR(ctx->cq_ev_fd); + ret = -ENOMEM; + ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL); + if (!ev_fd) + goto out; - ctx->cq_ev_fd = NULL; - return ret; + ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd); + if (IS_ERR(ev_fd->cq_ev_fd)) { + ret = PTR_ERR(ev_fd->cq_ev_fd); + kfree(ev_fd); + goto out; } - return 0; + rcu_assign_pointer(ctx->io_ev_fd, ev_fd); + ret = 0; + +out: + mutex_unlock(&ctx->ev_fd_lock); + return ret; } static int io_eventfd_unregister(struct io_ring_ctx *ctx) { - if (ctx->cq_ev_fd) { - eventfd_ctx_put(ctx->cq_ev_fd); - ctx->cq_ev_fd = NULL; - return 0; + struct io_ev_fd *ev_fd; + int ret; + + mutex_lock(&ctx->ev_fd_lock); + ev_fd = rcu_dereference_protected(ctx->io_ev_fd, lockdep_is_held(&ctx->ev_fd_lock)); + if (!ev_fd) { + ret = -ENXIO; + goto out; } + synchronize_rcu(); + eventfd_ctx_put(ev_fd->cq_ev_fd); + kfree(ev_fd); + rcu_assign_pointer(ctx->io_ev_fd, NULL); + ret = 0; - return -ENXIO; +out: + mutex_unlock(&ctx->ev_fd_lock); + return ret; } static void io_destroy_buffers(struct io_ring_ctx *ctx) @@ -10960,6 +10999,8 @@ static bool io_register_op_must_quiesce(int op) case IORING_REGISTER_FILES: case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: + case IORING_REGISTER_EVENTFD: + case IORING_UNREGISTER_EVENTFD: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: -- 2.25.1