[RFC PATCH v3 03/10] task_work, sched: Add a _locked variant to task_work_cancel()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Later commits will need to issue a task_work_cancel() from within the
scheduler with the task's ->pi_lock held.

Add a _locked variant that expects p->pi_lock to be held. Expose it in a
separate scheduler header file, as this really is a scheduler-only
interface.

Signed-off-by: Valentin Schneider <vschneid@xxxxxxxxxx>
---
 kernel/sched/task_work_sched.h | 14 +++++++
 kernel/task_work.c             | 67 ++++++++++++++++++++++++++--------
 2 files changed, 66 insertions(+), 15 deletions(-)
 create mode 100644 kernel/sched/task_work_sched.h

diff --git a/kernel/sched/task_work_sched.h b/kernel/sched/task_work_sched.h
new file mode 100644
index 0000000000000..e235da456427f
--- /dev/null
+++ b/kernel/sched/task_work_sched.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Scheduler internal task_work methods
+ */
+#ifndef _KERNEL_TASK_WORK_SCHED_H
+#define _KERNEL_TASK_WORK_SCHED_H
+
+#include <linux/task_work.h>
+#include <linux/sched.h>
+
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func);
+
+#endif
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 95a7e1b7f1dab..81092bc2e7371 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,6 +3,8 @@
 #include <linux/task_work.h>
 #include <linux/resume_user_mode.h>
 
+#include "sched/task_work_sched.h"
+
 static struct callback_head work_exited; /* all we need is ->next == NULL */
 
 /**
@@ -74,33 +76,20 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
 	return 0;
 }
 
-/**
- * task_work_cancel_match - cancel a pending work added by task_work_add()
- * @task: the task which should execute the work
- * @match: match function to call
- * @data: data to be passed in to match function
- *
- * RETURNS:
- * The found work or NULL if not found.
- */
-struct callback_head *
-task_work_cancel_match(struct task_struct *task,
+static struct callback_head *
+task_work_cancel_match_locked(struct task_struct *task,
 		       bool (*match)(struct callback_head *, void *data),
 		       void *data)
 {
 	struct callback_head **pprev = &task->task_works;
 	struct callback_head *work;
-	unsigned long flags;
 
-	if (likely(!task_work_pending(task)))
-		return NULL;
 	/*
 	 * If cmpxchg() fails we continue without updating pprev.
 	 * Either we raced with task_work_add() which added the
 	 * new entry before this work, we will find it again. Or
 	 * we raced with task_work_run(), *pprev == NULL/exited.
 	 */
-	raw_spin_lock_irqsave(&task->pi_lock, flags);
 	work = READ_ONCE(*pprev);
 	while (work) {
 		if (!match(work, data)) {
@@ -109,6 +98,32 @@ task_work_cancel_match(struct task_struct *task,
 		} else if (try_cmpxchg(pprev, &work, work->next))
 			break;
 	}
+
+	return work;
+}
+
+/**
+ * task_work_cancel_match - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @match: match function to call
+ * @data: data to be passed in to match function
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_match(struct task_struct *task,
+		       bool (*match)(struct callback_head *, void *data),
+		       void *data)
+{
+	unsigned long flags;
+	struct callback_head *work;
+
+	if (likely(!task_work_pending(task)))
+		return NULL;
+
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	work = task_work_cancel_match_locked(task, match, data);
 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
 	return work;
@@ -136,6 +151,28 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
 	return task_work_cancel_match(task, task_work_func_match, func);
 }
 
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func)
+{
+	lockdep_assert_held(&task->pi_lock);
+
+	if (likely(!task_work_pending(task)))
+		return NULL;
+
+	return task_work_cancel_match_locked(task, task_work_func_match, func);
+}
+
 /**
  * task_work_run - execute the works added by task_work_add()
  *
-- 
2.43.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux