Many scx schedulers define their own concept of scheduling domains to represent topology characteristics, such as heterogeneous architectures (e.g., big.LITTLE, P-cores/E-cores), or to categorize tasks based on specific properties (e.g., setting the soft-affinity of certain tasks to a subset of CPUs). Currently, there is no mechanism to share these domains with the built-in idle CPU selection policy. As a result, schedulers often implement their own idle CPU selection policies, which are typically similar to one another, leading to a lot of code duplication. To address this, introduce the concept of allowed domain (represented as a cpumask) that can be used by the BPF schedulers to apply the built-in idle CPU selection policy to a subset of preferred CPUs. With this concept the idle CPU selection policy becomes the following: - always prioritize CPUs from fully idle SMT cores (if SMT is enabled), - select the same CPU if it's idle and in the allowed domain, - select an idle CPU within the same LLC domain, if the LLC domain is a subset of the allowed domain, - select an idle CPU within the same node, if the node domain is a subset of the allowed domain, - select an idle CPU within the allowed domain. If the allowed domain is empty or NULL, the behavior of the built-in idle CPU selection policy remains unchanged. This only introduces the core concept of allowed domain. This functionality will be exposed through a dedicated kfunc in a separate patch. Signed-off-by: Andrea Righi <arighi@xxxxxxxxxx> --- kernel/sched/ext.c | 2 +- kernel/sched/ext_idle.c | 128 +++++++++++++++++++++++++++++----------- kernel/sched/ext_idle.h | 3 +- 3 files changed, 97 insertions(+), 36 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 8c9f36baf7dfd..1e9414ffeff01 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3395,7 +3395,7 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag } else { s32 cpu; - cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); + cpu = scx_select_cpu_dfl(p, p->cpus_ptr, prev_cpu, wake_flags, 0); if (cpu >= 0) { p->scx.slice = SCX_SLICE_DFL; p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 4f8a6e46a37a4..9469bf41fd571 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -46,6 +46,11 @@ static struct scx_idle_cpus scx_idle_global_masks; */ static struct scx_idle_cpus **scx_idle_node_masks; +/* + * Local per-CPU cpumasks (used to generate temporary idle cpumasks). + */ +static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask); + /* * Return the idle masks associated to a target @node. * @@ -391,6 +396,21 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) static_branch_disable_cpuslocked(&scx_selcpu_topo_numa); } +static const struct cpumask * +task_allowed_cpumask(const struct task_struct *p, const struct cpumask *cpus_allowed, s32 prev_cpu) +{ + struct cpumask *allowed; + + if (cpus_allowed == p->cpus_ptr || p->nr_cpus_allowed >= num_possible_cpus()) + return cpus_allowed; + + allowed = this_cpu_cpumask_var_ptr(local_idle_cpumask); + if (!cpumask_and(allowed, p->cpus_ptr, cpus_allowed)) + return NULL; + + return allowed; +} + /* * Built-in CPU idle selection policy: * @@ -403,50 +423,83 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) * branch prediction optimizations. * * 3. Pick a CPU within the same LLC (Last-Level Cache): - * - if the above conditions aren't met, pick a CPU that shares the same LLC - * to maintain cache locality. + * - if the above conditions aren't met, pick a CPU that shares the same + * LLC, if the LLC domain is a subset of @cpus_allowed, to maintain + * cache locality. * * 4. Pick a CPU within the same NUMA node, if enabled: - * - choose a CPU from the same NUMA node to reduce memory access latency. + * - choose a CPU from the same NUMA node, if the node domain is a subset + * of @cpus_allowed, to reduce memory access latency. + * + * 5. Pick any idle CPU within the @cpus_allowed domain. * - * 5. Pick any idle CPU usable by the task. + * If @cpus_allowed is NULL, the task is allowed to run on any CPU. * * Step 3 and 4 are performed only if the system has, respectively, multiple * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and - * scx_selcpu_topo_numa). + * scx_selcpu_topo_numa) and their domains don't overlap. + * + * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always + * begin in @prev_cpu's node and proceed to other nodes in order of + * increasing distance. + * + * Return the picked CPU if idle, or a negative value otherwise. * * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because * we never call ops.select_cpu() for them, see select_task_rq(). */ -s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags) +s32 scx_select_cpu_dfl(struct task_struct *p, const struct cpumask *cpus_allowed, + s32 prev_cpu, u64 wake_flags, u64 flags) { - const struct cpumask *llc_cpus = NULL; - const struct cpumask *numa_cpus = NULL; - int node = scx_cpu_node_if_enabled(prev_cpu); + const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL; + const struct cpumask *allowed; + int node; s32 cpu; + preempt_disable(); + + /* + * Determine the allowed scheduling domain of the task. + */ + allowed = task_allowed_cpumask(p, cpus_allowed, prev_cpu); + if (!allowed) { + cpu = -EBUSY; + goto out_enable; + } + + /* + * If @prev_cpu is not in the allowed domain, try to assign a new + * arbitrary CPU in the allowed domain. + */ + if (!cpumask_test_cpu(prev_cpu, allowed)) { + cpu = cpumask_any_and_distribute(p->cpus_ptr, allowed); + if (cpu < nr_cpu_ids) + prev_cpu = cpu; + } + node = scx_cpu_node_if_enabled(prev_cpu); + /* * This is necessary to protect llc_cpus. */ rcu_read_lock(); /* - * Determine the scheduling domain only if the task is allowed to run - * on all CPUs. - * - * This is done primarily for efficiency, as it avoids the overhead of - * updating a cpumask every time we need to select an idle CPU (which - * can be costly in large SMP systems), but it also aligns logically: - * if a task's scheduling domain is restricted by user-space (through - * CPU affinity), the task will simply use the flat scheduling domain - * defined by user-space. + * Consider node/LLC scheduling domains only if the allowed cpumask + * contains all the CPUs of each particular domain and if the + * domains don't overlap. */ - if (p->nr_cpus_allowed >= num_possible_cpus()) { - if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) - numa_cpus = numa_span(prev_cpu); + if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) { + const struct cpumask *cpus = numa_span(prev_cpu); + + if (cpus && !cpumask_equal(cpus, allowed) && cpumask_subset(cpus, allowed)) + numa_cpus = cpus; + } + + if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) { + const struct cpumask *cpus = llc_span(prev_cpu); - if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) - llc_cpus = llc_span(prev_cpu); + if (cpus && !cpumask_equal(cpus, allowed) && cpumask_subset(cpus, allowed)) + llc_cpus = cpus; } /* @@ -484,7 +537,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 cpu_rq(cpu)->scx.local_dsq.nr == 0 && (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) && !cpumask_empty(idle_cpumask(waker_node)->cpu)) { - if (cpumask_test_cpu(cpu, p->cpus_ptr)) + if (cpumask_test_cpu(cpu, allowed)) goto out_unlock; } } @@ -529,7 +582,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 * begin in prev_cpu's node and proceed to other nodes in * order of increasing distance. */ - cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE); + cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE); if (cpu >= 0) goto out_unlock; @@ -577,12 +630,14 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 * in prev_cpu's node and proceed to other nodes in order of * increasing distance. */ - cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags); + cpu = scx_pick_idle_cpu(allowed, node, flags); if (cpu >= 0) goto out_unlock; out_unlock: rcu_read_unlock(); +out_enable: + preempt_enable(); return cpu; } @@ -592,7 +647,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 */ void scx_idle_init_masks(void) { - int node; + int i; /* Allocate global idle cpumasks */ BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL)); @@ -603,14 +658,19 @@ void scx_idle_init_masks(void) sizeof(*scx_idle_node_masks), GFP_KERNEL); BUG_ON(!scx_idle_node_masks); - for_each_node(node) { - scx_idle_node_masks[node] = kzalloc_node(sizeof(**scx_idle_node_masks), - GFP_KERNEL, node); - BUG_ON(!scx_idle_node_masks[node]); + for_each_node(i) { + scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks), + GFP_KERNEL, i); + BUG_ON(!scx_idle_node_masks[i]); - BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->cpu, GFP_KERNEL, node)); - BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->smt, GFP_KERNEL, node)); + BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i)); + BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i)); } + + /* Allocate local per-cpu idle cpumasks */ + for_each_possible_cpu(i) + BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i), + GFP_KERNEL, cpu_to_node(i))); } static void update_builtin_idle(int cpu, bool idle) @@ -825,7 +885,7 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, goto prev_cpu; #ifdef CONFIG_SMP - cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); + cpu = scx_select_cpu_dfl(p, p->cpus_ptr, prev_cpu, wake_flags, 0); if (cpu >= 0) { *is_idle = true; return cpu; diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h index 511cc2221f7a8..977f49905f2c7 100644 --- a/kernel/sched/ext_idle.h +++ b/kernel/sched/ext_idle.h @@ -27,7 +27,8 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node } #endif /* CONFIG_SMP */ -s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags); +s32 scx_select_cpu_dfl(struct task_struct *p, const struct cpumask *cpus_allowed, + s32 prev_cpu, u64 wake_flags, u64 flags); void scx_idle_enable(struct sched_ext_ops *ops); void scx_idle_disable(void); int scx_idle_init(void); -- 2.48.1