If the callsite cares, use task_work_pending(). If not, just call task_work_run() unconditionally, that makes the check inline and doesn't add any extra overhead. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io-wq.c | 7 ++----- fs/io_uring.c | 20 ++++++++------------ include/linux/tracehook.h | 3 +-- kernel/signal.c | 7 +++---- 4 files changed, 14 insertions(+), 23 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 4023c9846860..5bee3f5f67e1 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -717,8 +717,7 @@ static int io_wq_manager(void *data) complete(&wq->done); while (!kthread_should_stop()) { - if (current->task_works) - task_work_run(); + task_work_run(); for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; @@ -742,9 +741,7 @@ static int io_wq_manager(void *data) schedule_timeout(HZ); } - if (current->task_works) - task_work_run(); - + task_work_run(); return 0; err: set_bit(IO_WQ_BIT_ERROR, &wq->state); diff --git a/fs/io_uring.c b/fs/io_uring.c index 79bd22289d73..1579390c7c53 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5967,8 +5967,7 @@ static int io_sq_thread(void *data) if (!list_empty(&ctx->poll_list) || (!time_after(jiffies, timeout) && ret != -EBUSY && !percpu_ref_is_dying(&ctx->refs))) { - if (current->task_works) - task_work_run(); + task_work_run(); cond_resched(); continue; } @@ -6000,8 +5999,8 @@ static int io_sq_thread(void *data) finish_wait(&ctx->sqo_wait, &wait); break; } - if (current->task_works) { - task_work_run(); + if (task_work_pending()) { + __task_work_run(); finish_wait(&ctx->sqo_wait, &wait); continue; } @@ -6024,8 +6023,7 @@ static int io_sq_thread(void *data) timeout = jiffies + ctx->sq_thread_idle; } - if (current->task_works) - task_work_run(); + task_work_run(); set_fs(old_fs); if (cur_mm) { @@ -6094,9 +6092,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, do { if (io_cqring_events(ctx, false) >= min_events) return 0; - if (!current->task_works) + if (!task_work_pending()) break; - task_work_run(); + __task_work_run(); } while (1); if (sig) { @@ -6117,8 +6115,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, do { prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); - if (current->task_works) - task_work_run(); + task_work_run(); if (io_should_wake(&iowq, false)) break; schedule(); @@ -7467,8 +7464,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, int submitted = 0; struct fd f; - if (current->task_works) - task_work_run(); + task_work_run(); if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP)) return -EINVAL; diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 36fb3bbed6b2..608a2d12bc14 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -184,8 +184,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); - if (unlikely(current->task_works)) - task_work_run(); + task_work_run(); #ifdef CONFIG_KEYS_REQUEST_CACHE if (unlikely(current->cached_requested_key)) { diff --git a/kernel/signal.c b/kernel/signal.c index e58a6c619824..d62b7a3f2045 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2271,8 +2271,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why) void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); - if (unlikely(current->task_works)) - task_work_run(); + + task_work_run(); spin_lock_irq(¤t->sighand->siglock); ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); @@ -2529,8 +2529,7 @@ bool get_signal(struct ksignal *ksig) struct signal_struct *signal = current->signal; int signr; - if (unlikely(current->task_works)) - task_work_run(); + task_work_run(); if (unlikely(uprobe_deny_signal())) return false; -- 2.26.0