Hello Jens Excuse me, this change need to be review by you, I didn't think of a better way to avoid the problem I described, I hope I can get your opinion. Thanks Qiang ________________________________________ From: Zhang, Qiang <qiang.zhang@xxxxxxxxxxxxx> Sent: Wednesday, 9 June 2021 16:15 To: axboe@xxxxxxxxx Cc: viro@xxxxxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx; linux-fsdevel@xxxxxxxxxxxxxxx Subject: [PATCH v3] eventfd: convert global percpu eventfd_wake_count to ctx percpu eventfd_wake_count From: Zqiang <qiang.zhang@xxxxxxxxxxxxx> In RT system, the spinlock_irq be replaced by rt_mutex, when call eventfd_signal(), if the current task is preempted after increasing the current CPU eventfd_wake_count, when other task run on this CPU and call eventfd_signal(), find this CPU eventfd_wake_count is not zero, will trigger warning and direct return, miss wakeup. RIP: 0010:eventfd_signal+0x85/0xa0 vhost_add_used_and_signal_n+0x41/0x50 [vhost] handle_rx+0xb9/0x9e0 [vhost_net] handle_rx_net+0x15/0x20 [vhost_net] vhost_worker+0x95/0xe0 [vhost] kthread+0x19c/0x1c0 ret_from_fork+0x22/0x30 In no-RT system, even if the eventfd_signal() call is nested, if if it's different eventfd_ctx object, it is not happen deadlock. Fixes: b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth") Reported-by: kernel test robot <lkp@xxxxxxxxx> Signed-off-by: Zqiang <qiang.zhang@xxxxxxxxxxxxx> --- v1->v2: Modify submission information. v2->v3: Fix compilation error in riscv32. fs/aio.c | 2 +- fs/eventfd.c | 30 ++++++++++-------------------- include/linux/eventfd.h | 26 +++++++++++++++++++++----- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 76ce0cc3ee4e..b45983d5d35a 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1695,7 +1695,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, list_del(&iocb->ki_list); iocb->ki_res.res = mangle_poll(mask); req->done = true; - if (iocb->ki_eventfd && eventfd_signal_count()) { + if (iocb->ki_eventfd && eventfd_signal_count(iocb->ki_eventfd)) { iocb = NULL; INIT_WORK(&req->work, aio_poll_put_work); schedule_work(&req->work); diff --git a/fs/eventfd.c b/fs/eventfd.c index e265b6dd4f34..b1df2c5720a7 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -25,26 +25,9 @@ #include <linux/idr.h> #include <linux/uio.h> -DEFINE_PER_CPU(int, eventfd_wake_count); static DEFINE_IDA(eventfd_ida); -struct eventfd_ctx { - struct kref kref; - wait_queue_head_t wqh; - /* - * Every time that a write(2) is performed on an eventfd, the - * value of the __u64 being written is added to "count" and a - * wakeup is performed on "wqh". A read(2) will return the "count" - * value to userspace, and will reset "count" to zero. The kernel - * side eventfd_signal() also, adds to the "count" counter and - * issue a wakeup. - */ - __u64 count; - unsigned int flags; - int id; -}; - /** * eventfd_signal - Adds @n to the eventfd counter. * @ctx: [in] Pointer to the eventfd context. @@ -71,17 +54,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) * it returns true, the eventfd_signal() call should be deferred to a * safe context. */ - if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count))) + if (WARN_ON_ONCE(this_cpu_read(*ctx->eventfd_wake_count))) return 0; spin_lock_irqsave(&ctx->wqh.lock, flags); - this_cpu_inc(eventfd_wake_count); + this_cpu_inc(*ctx->eventfd_wake_count); if (ULLONG_MAX - ctx->count < n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN); - this_cpu_dec(eventfd_wake_count); + this_cpu_dec(*ctx->eventfd_wake_count); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; @@ -92,6 +75,9 @@ static void eventfd_free_ctx(struct eventfd_ctx *ctx) { if (ctx->id >= 0) ida_simple_remove(&eventfd_ida, ctx->id); + + if (ctx->eventfd_wake_count) + free_percpu(ctx->eventfd_wake_count); kfree(ctx); } @@ -421,6 +407,10 @@ static int do_eventfd(unsigned int count, int flags) if (!ctx) return -ENOMEM; + ctx->eventfd_wake_count = alloc_percpu(int); + if (!ctx->eventfd_wake_count) + goto err; + kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index fa0a524baed0..6311b931ac6f 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> +#include <linux/kref.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -29,11 +30,27 @@ #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) -struct eventfd_ctx; struct file; #ifdef CONFIG_EVENTFD +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + /* + * Every time that a write(2) is performed on an eventfd, the + * value of the __u64 being written is added to "count" and a + * wakeup is performed on "wqh". A read(2) will return the "count" + * value to userspace, and will reset "count" to zero. The kernel + * side eventfd_signal() also, adds to the "count" counter and + * issue a wakeup. + */ + __u64 count; + unsigned int flags; + int id; + int __percpu *eventfd_wake_count; +}; + void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); @@ -43,11 +60,10 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); -DECLARE_PER_CPU(int, eventfd_wake_count); -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_count(struct eventfd_ctx *ctx) { - return this_cpu_read(eventfd_wake_count); + return this_cpu_read(*ctx->eventfd_wake_count); } #else /* CONFIG_EVENTFD */ @@ -78,7 +94,7 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_count(struct eventfd_ctx *ctx) { return false; } -- 2.17.1