[PATCH] sched: Move task_mm_cid_work to mm delayed work

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently, the task_mm_cid_work function is called in a task work
triggered by a scheduler tick. This can delay the execution of the task
for the entire duration of the function, negatively affecting the
response of real time tasks.

This patch runs the task_mm_cid_work in a new delayed work connected to
the mm_struct rather than in the task context before returning to
userspace.

This delayed work is initialised while allocating the mm and disabled
before freeing it, its execution is no longer triggered by scheduler
ticks but running periodically based on the defined MM_CID_SCAN_DELAY.

The main advantage of this change is that the function can be offloaded
to a different CPU and even preempted by RT tasks.

On a busy system, this may mean the function gets called less often, but
the current behaviour already doesn't provide guarantees. Moreover, this
new behaviour could be more predictable in some situations since the
delayed work is always scheduled with the same periodicity for each mm.

This deprecate the "sched: improve task_mm_cid_work impact on isolated
systems" patches by dropping the first patch and using a workqueue
instead of RCU callbacks.

Signed-off-by: Gabriele Monaco <gmonaco@xxxxxxxxxx>
Link: https://lore.kernel.org/lkml/20241202140735.56368-1-gmonaco@xxxxxxxxxx/
---
 include/linux/mm_types.h |  5 +++++
 kernel/sched/core.c      | 37 +++++++------------------------------
 2 files changed, 12 insertions(+), 30 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7361a8f3ab68..38c567f06dce 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -856,6 +856,7 @@ struct mm_struct {
 		 * mm nr_cpus_allowed updates.
 		 */
 		raw_spinlock_t cpus_allowed_lock;
+		struct delayed_work mm_cid_work;
 #endif
 #ifdef CONFIG_MMU
 		atomic_long_t pgtables_bytes;	/* size of all page tables */
@@ -1149,6 +1150,8 @@ enum mm_cid_state {
 	MM_CID_LAZY_PUT = (1U << 31),
 };
 
+extern void task_mm_cid_work(struct work_struct *work);
+
 static inline bool mm_cid_is_unset(int cid)
 {
 	return cid == MM_CID_UNSET;
@@ -1221,12 +1224,14 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
 	if (!mm->pcpu_cid)
 		return -ENOMEM;
 	mm_init_cid(mm, p);
+	INIT_DELAYED_WORK(&mm->mm_cid_work, task_mm_cid_work);
 	return 0;
 }
 #define mm_alloc_cid(...)	alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
 
 static inline void mm_destroy_cid(struct mm_struct *mm)
 {
+	disable_delayed_work(&mm->mm_cid_work);
 	free_percpu(mm->pcpu_cid);
 	mm->pcpu_cid = NULL;
 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 95e40895a519..0c3a778c9cb5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5654,7 +5654,6 @@ void sched_tick(void)
 		resched_latency = cpu_resched_latency(rq);
 	calc_global_load_tick(rq);
 	sched_core_tick(rq);
-	task_tick_mm_cid(rq, donor);
 	scx_tick(rq);
 
 	rq_unlock(rq, &rf);
@@ -10520,22 +10519,14 @@ static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
 }
 
-static void task_mm_cid_work(struct callback_head *work)
+void task_mm_cid_work(struct work_struct *work)
 {
 	unsigned long now = jiffies, old_scan, next_scan;
-	struct task_struct *t = current;
 	struct cpumask *cidmask;
-	struct mm_struct *mm;
+	struct delayed_work *delayed_work = container_of(work, struct delayed_work, work);
+	struct mm_struct *mm = container_of(delayed_work, struct mm_struct, mm_cid_work);
 	int weight, cpu;
 
-	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
-
-	work->next = work;	/* Prevent double-add */
-	if (t->flags & PF_EXITING)
-		return;
-	mm = t->mm;
-	if (!mm)
-		return;
 	old_scan = READ_ONCE(mm->mm_cid_next_scan);
 	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
 	if (!old_scan) {
@@ -10571,26 +10562,12 @@ void init_sched_mm_cid(struct task_struct *t)
 
 	if (mm) {
 		mm_users = atomic_read(&mm->mm_users);
-		if (mm_users == 1)
+		if (mm_users == 1) {
 			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
+			schedule_delayed_work(&mm->mm_cid_work,
+					      msecs_to_jiffies(MM_CID_SCAN_DELAY));
+		}
 	}
-	t->cid_work.next = &t->cid_work;	/* Protect against double add */
-	init_task_work(&t->cid_work, task_mm_cid_work);
-}
-
-void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
-{
-	struct callback_head *work = &curr->cid_work;
-	unsigned long now = jiffies;
-
-	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
-	    work->next != work)
-		return;
-	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
-		return;
-
-	/* No page allocation under rq lock */
-	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
 }
 
 void sched_mm_cid_exit_signals(struct task_struct *t)

base-commit: feffde684ac29a3b7aec82d2df850fbdbdee55e4
-- 
2.47.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux