[PATCH 6/9] sched: add a sched_work list

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is similar to the task_works, and uses the same infrastructure, but
the sched_work list is run when the task is being scheduled in or out.

The intended use case here is for core code to be able to add work
that should be automatically run by the task, without the task needing
to do anything. This is done outside of the task, one example would be
from waitqueue handlers, or anything else that is invoked out-of-band
from the task itself.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 include/linux/sched.h     |  4 ++-
 include/linux/task_work.h |  5 ++++
 kernel/sched/core.c       | 16 ++++++++--
 kernel/task_work.c        | 62 ++++++++++++++++++++++++++++++++++++---
 4 files changed, 80 insertions(+), 7 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04278493bf15..da15112c1140 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -648,6 +648,7 @@ struct task_struct {
 	/* Per task flags (PF_*), defined further below: */
 	unsigned int			flags;
 	unsigned int			ptrace;
+	int				on_rq;
 
 #ifdef CONFIG_SMP
 	struct llist_node		wake_entry;
@@ -670,13 +671,14 @@ struct task_struct {
 	int				recent_used_cpu;
 	int				wake_cpu;
 #endif
-	int				on_rq;
 
 	int				prio;
 	int				static_prio;
 	int				normal_prio;
 	unsigned int			rt_priority;
 
+	struct callback_head		*sched_work;
+
 	const struct sched_class	*sched_class;
 	struct sched_entity		se;
 	struct sched_rt_entity		rt;
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index bd9a6a91c097..e0c56f461df6 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -17,9 +17,14 @@ int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
 void task_work_run(void);
 
+int sched_work_add(struct task_struct *task, struct callback_head *work);
+struct callback_head *sched_work_cancel(struct task_struct *, task_work_func_t);
+void sched_work_run(void);
+
 static inline void exit_task_work(struct task_struct *task)
 {
 	task_work_run();
+	sched_work_run();
 }
 
 #endif	/* _LINUX_TASK_WORK_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c7bab13f9caa..9e0f754e0630 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2678,6 +2678,7 @@ int wake_up_state(struct task_struct *p, unsigned int state)
 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
 	p->on_rq			= 0;
+	p->sched_work			= NULL;
 
 	p->se.on_rq			= 0;
 	p->se.exec_start		= 0;
@@ -4102,8 +4103,13 @@ void __noreturn do_task_dead(void)
 		cpu_relax();
 }
 
-static void sched_out_update(struct task_struct *tsk)
+static bool sched_out_update(struct task_struct *tsk)
 {
+	if (unlikely(tsk->sched_work)) {
+		sched_work_run();
+		return true;
+	}
+
 	/*
 	 * If a worker went to sleep, notify and ask workqueue whether
 	 * it wants to wake up a task to maintain concurrency.
@@ -4119,6 +4125,8 @@ static void sched_out_update(struct task_struct *tsk)
 			io_wq_worker_sleeping(tsk);
 		preempt_enable_no_resched();
 	}
+
+	return false;
 }
 
 static void sched_in_update(struct task_struct *tsk)
@@ -4129,6 +4137,8 @@ static void sched_in_update(struct task_struct *tsk)
 		else
 			io_wq_worker_running(tsk);
 	}
+	if (unlikely(tsk->sched_work))
+		sched_work_run();
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -4136,7 +4146,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
 	if (!tsk->state)
 		return;
 
-	sched_out_update(tsk);
+	/* if we processed work, we could be runnable again. check. */
+	if (sched_out_update(tsk) && !tsk->state)
+		return;
 
 	if (tsk_is_pi_blocked(tsk))
 		return;
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 3445421266e7..ba62485d5b3d 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,7 +3,14 @@
 #include <linux/task_work.h>
 #include <linux/tracehook.h>
 
-static struct callback_head work_exited; /* all we need is ->next == NULL */
+static void task_exit_func(struct callback_head *head)
+{
+}
+
+static struct callback_head work_exited = {
+	.next	= NULL,
+	.func	= task_exit_func,
+};
 
 static int __task_work_add(struct task_struct *task,
 			   struct callback_head **headptr,
@@ -53,6 +60,28 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
 	return ret;
 }
 
+/**
+ * sched_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+ * @work: the callback to run
+ * @notify: send the notification if true
+ *
+ * Queue @work for sched_work_run() below.
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task is either scheduled
+ * in or out.
+ *
+ * Note: there is no ordering guarantee on works queued here.
+ *
+ * RETURNS:
+ * 0 if succeeds or -ESRCH.
+ */
+int
+sched_work_add(struct task_struct *task, struct callback_head *work)
+{
+	return __task_work_add(task, &task->sched_work, work);
+}
+
 static struct callback_head *__task_work_cancel(struct task_struct *task,
 						struct callback_head **headptr,
 						task_work_func_t func)
@@ -98,10 +127,27 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
 	return __task_work_cancel(task, &task->task_works, func);
 }
 
-static void __task_work_run(struct task_struct *task,
-			    struct callback_head **headptr)
+/**
+ * sched_work_cancel - cancel a pending work added by sched_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+sched_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+	return __task_work_cancel(task, &task->sched_work, func);
+}
+
+static void __task_work_run(struct callback_head **headptr)
 {
 	struct callback_head *work, *head, *next;
+	struct task_struct *task = current;
 
 	for (;;) {
 		/*
@@ -148,5 +194,13 @@ static void __task_work_run(struct task_struct *task,
  */
 void task_work_run(void)
 {
-	__task_work_run(current, &current->task_works);
+	__task_work_run(&current->task_works);
+}
+
+/**
+ * sched_work_run - execute the works added by sched_work_add()
+ */
+void sched_work_run()
+{
+	__task_work_run(&current->sched_work);
 }
-- 
2.25.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux