Various callsites currently check current->task_works != NULL to know when to run work. Add a helper that we use internally for that. This is in preparation for also not running if exit queue has been queued. Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- include/linux/task_work.h | 16 ++++++++++++++-- kernel/task_work.c | 2 +- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/include/linux/task_work.h b/include/linux/task_work.h index bd9a6a91c097..088538590e65 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -15,11 +15,23 @@ init_task_work(struct callback_head *twork, task_work_func_t func) int task_work_add(struct task_struct *task, struct callback_head *twork, bool); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); -void task_work_run(void); +void __task_work_run(void); + +static inline bool task_work_pending(void) +{ + return current->task_works; +} + +static inline void task_work_run(void) +{ + if (task_work_pending()) + __task_work_run(); +} static inline void exit_task_work(struct task_struct *task) { - task_work_run(); + /* must always run to install exit work */ + __task_work_run(); } #endif /* _LINUX_TASK_WORK_H */ diff --git a/kernel/task_work.c b/kernel/task_work.c index 825f28259a19..9620333423a3 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -87,7 +87,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) * it exits. In the latter case task_work_add() can no longer add the * new work after task_work_run() returns. */ -void task_work_run(void) +void __task_work_run(void) { struct task_struct *task = current; struct callback_head *work, *head, *next; -- 2.26.0