Some task work users implement their own ways to know if a callback is already queued on the current task while fiddling with the callback head internals. Provide instead a consolidated API to serve this very purpose. Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx> --- include/linux/task_work.h | 12 ++++++++++++ kernel/task_work.c | 11 +++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 0646804860ff..31caf12c1313 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -5,12 +5,15 @@ #include <linux/list.h> #include <linux/sched.h> +#define TASK_WORK_DEQUEUED ((void *) -1UL) + typedef void (*task_work_func_t)(struct callback_head *); static inline void init_task_work(struct callback_head *twork, task_work_func_t func) { twork->func = func; + twork->next = TASK_WORK_DEQUEUED; } enum task_work_notify_mode { @@ -26,6 +29,15 @@ static inline bool task_work_pending(struct task_struct *task) return READ_ONCE(task->task_works); } +/* + * Check if a work is queued. Beware: this is inherently racy if the work can + * be queued elsewhere than the current task. + */ +static inline bool task_work_queued(struct callback_head *twork) +{ + return twork->next != TASK_WORK_DEQUEUED; +} + int task_work_add(struct task_struct *task, struct callback_head *twork, enum task_work_notify_mode mode); diff --git a/kernel/task_work.c b/kernel/task_work.c index d1efec571a4a..0d7b04095753 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -56,6 +56,8 @@ int task_work_add(struct task_struct *task, struct callback_head *work, { struct callback_head *head; + work->next = TASK_WORK_DEQUEUED; + if (notify == TWA_NMI_CURRENT) { if (WARN_ON_ONCE(task != current)) return -EINVAL; @@ -67,8 +69,10 @@ int task_work_add(struct task_struct *task, struct callback_head *work, head = READ_ONCE(task->task_works); do { - if (unlikely(head == &work_exited)) + if (unlikely(head == &work_exited)) { + work->next = TASK_WORK_DEQUEUED; return -ESRCH; + } work->next = head; } while (!try_cmpxchg(&task->task_works, &head, work)); @@ -129,8 +133,10 @@ task_work_cancel_match(struct task_struct *task, if (!match(work, data)) { pprev = &work->next; work = READ_ONCE(*pprev); - } else if (try_cmpxchg(pprev, &work, work->next)) + } else if (try_cmpxchg(pprev, &work, work->next)) { + work->next = TASK_WORK_DEQUEUED; break; + } } raw_spin_unlock_irqrestore(&task->pi_lock, flags); @@ -224,6 +230,7 @@ void task_work_run(void) do { next = work->next; + work->next = TASK_WORK_DEQUEUED; work->func(work); work = next; cond_resched(); -- 2.46.0