Remove the ad-hoc implementation of task_work_queued(). Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx> --- kernel/sched/fair.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ce2e94ccad0c..6a2f45821dc0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3303,7 +3303,6 @@ static void task_numa_work(struct callback_head *work) SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); - work->next = work; /* * Who cares about NUMA placement when they're dying. * @@ -3551,8 +3550,6 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_migrate_retry = 0; - /* Protect against double add, see task_tick_numa and task_numa_work */ - p->numa_work.next = &p->numa_work; p->numa_faults = NULL; p->numa_pages_migrated = 0; p->total_numa_faults = 0; @@ -3593,7 +3590,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) /* * We don't care about NUMA placement if we don't have memory. */ - if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) + if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || task_work_queued(work)) return; /* -- 2.46.0