Rather than hardwire this to kernel threads, add a task flag that tells us whether the task in question runs task_work or not. At fork time, this flag is set for kernel threads. This is in preparation for allowing kernel threads to signal that they will run deferred task_work. No functional changes in this patch. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/file_table.c | 2 +- include/linux/sched.h | 2 +- kernel/fork.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/file_table.c b/fs/file_table.c index 5c00dc38558d..d824f1330d6e 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -520,7 +520,7 @@ void fput(struct file *file) file_free(file); return; } - if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { + if (likely(!in_interrupt() && !(task->flags & PF_NO_TASKWORK))) { init_task_work(&file->f_task_work, ____fput); if (!task_work_add(task, &file->f_task_work, TWA_RESUME)) return; diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c15365a30c0..301f5dda6a06 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1709,7 +1709,7 @@ extern struct pid *cad_pid; * I am cleaning dirty pages from some other bdi. */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ -#define PF__HOLE__00800000 0x00800000 +#define PF_NO_TASKWORK 0x00800000 /* task doesn't run task_work */ #define PF__HOLE__01000000 0x01000000 #define PF__HOLE__02000000 0x02000000 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ diff --git a/kernel/fork.c b/kernel/fork.c index 735405a9c5f3..3745407624c7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2235,7 +2235,7 @@ __latent_entropy struct task_struct *copy_process( goto fork_out; p->flags &= ~PF_KTHREAD; if (args->kthread) - p->flags |= PF_KTHREAD; + p->flags |= PF_KTHREAD | PF_NO_TASKWORK; if (args->user_worker) { /* * Mark us a user worker, and block any signal that isn't -- 2.49.0