It's often useful to wake up and/or trigger reschedule on other CPUs. This patch adds scx_bpf_kick_cpu() kfunc helper that BPF scheduler can call to kick the target CPU into the scheduling path. As a sched_ext task relinquishes its CPU only after its slice is depleted, this patch also adds SCX_KICK_PREEMPT and SCX_ENQ_PREEMPT which clears the slice of the target CPU's current task to guarantee that sched_ext's scheduling path runs on the CPU. v4: * Move example scheduler to its own patch. v3: * Make scx_example_central switch all tasks by default. * Convert to BPF inline iterators. v2: * Julia Lawall reported that scx_example_central can overflow the dispatch buffer and malfunction. As scheduling for other CPUs can't be handled by the automatic retry mechanism, fix by implementing an explicit overflow and retry handling. * Updated to use generic BPF cpumask helpers. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reviewed-by: David Vernet <dvernet@xxxxxxxx> Acked-by: Josh Don <joshdon@xxxxxxxxxx> Acked-by: Hao Luo <haoluo@xxxxxxxxxx> Acked-by: Barret Rhoden <brho@xxxxxxxxxx> --- include/linux/sched/ext.h | 4 ++ kernel/sched/ext.c | 81 ++++++++++++++++++++++++++++++-- kernel/sched/ext.h | 12 +++++ kernel/sched/sched.h | 3 ++ tools/sched_ext/scx_common.bpf.h | 1 + 5 files changed, 98 insertions(+), 3 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 55f649bd065c..d6ebfa6163a1 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -408,6 +408,10 @@ struct sched_ext_entity { * scx_bpf_dispatch() but can also be modified directly by the BPF * scheduler. Automatically decreased by SCX as the task executes. On * depletion, a scheduling event is triggered. + * + * This value is cleared to zero if the task is preempted by + * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the + * task ran. Use p->se.sum_exec_runtime instead. */ u64 slice; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 65ee99ea111b..c18a67791bc7 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -507,7 +507,7 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, } } - if (enq_flags & SCX_ENQ_HEAD) + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) list_add(&p->scx.dsq_node, &dsq->fifo); else list_add_tail(&p->scx.dsq_node, &dsq->fifo); @@ -523,8 +523,16 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, if (is_local) { struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); + bool preempt = false; - if (sched_class_above(&ext_sched_class, rq->curr->sched_class)) + if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && + rq->curr->sched_class == &ext_sched_class) { + rq->curr->scx.slice = 0; + preempt = true; + } + + if (preempt || sched_class_above(&ext_sched_class, + rq->curr->sched_class)) resched_curr(rq); } else { raw_spin_unlock(&dsq->lock); @@ -1941,7 +1949,8 @@ int scx_check_setscheduler(struct task_struct *p, int policy) * Omitted operations: * * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task - * isn't tied to the CPU at that point. + * isn't tied to the CPU at that point. Preemption is implemented by resetting + * the victim task's slice to 0 and triggering reschedule on the target CPU. * * - migrate_task_rq: Unncessary as task to cpu mapping is transient. * @@ -2787,6 +2796,32 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = { .enable_mask = SYSRQ_ENABLE_RTNICE, }; +static void kick_cpus_irq_workfn(struct irq_work *irq_work) +{ + struct rq *this_rq = this_rq(); + int this_cpu = cpu_of(this_rq); + int cpu; + + for_each_cpu(cpu, this_rq->scx.cpus_to_kick) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + raw_spin_rq_lock_irqsave(rq, flags); + + if (cpu_online(cpu) || cpu == this_cpu) { + if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) && + rq->curr->sched_class == &ext_sched_class) + rq->curr->scx.slice = 0; + resched_curr(rq); + } + + raw_spin_rq_unlock_irqrestore(rq, flags); + } + + cpumask_clear(this_rq->scx.cpus_to_kick); + cpumask_clear(this_rq->scx.cpus_to_preempt); +} + /** * print_scx_info - print out sched_ext scheduler state * @log_lvl: the log level to use when printing @@ -2855,6 +2890,10 @@ void __init init_sched_ext_class(void) init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); INIT_LIST_HEAD(&rq->scx.watchdog_list); + + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); + init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); } register_sysrq_key('S', &sysrq_sched_ext_reset_op); @@ -3089,6 +3128,41 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; +/** + * scx_bpf_kick_cpu - Trigger reschedule on a CPU + * @cpu: cpu to kick + * @flags: %SCX_KICK_* flags + * + * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or + * trigger rescheduling on a busy CPU. This can be called from any online + * scx_ops operation and the actual kicking is performed asynchronously through + * an irq work. + */ +void scx_bpf_kick_cpu(s32 cpu, u64 flags) +{ + struct rq *rq; + + if (!ops_cpu_valid(cpu)) { + scx_ops_error("invalid cpu %d", cpu); + return; + } + + preempt_disable(); + rq = this_rq(); + + /* + * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting + * rq locks. We can probably be smarter and avoid bouncing if called + * from ops which don't hold a rq lock. + */ + cpumask_set_cpu(cpu, rq->scx.cpus_to_kick); + if (flags & SCX_KICK_PREEMPT) + cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt); + + irq_work_queue(&rq->scx.kick_cpus_irq_work); + preempt_enable(); +} + /** * scx_bpf_dsq_nr_queued - Return the number of queued tasks * @dsq_id: id of the DSQ @@ -3353,6 +3427,7 @@ s32 scx_bpf_task_cpu(const struct task_struct *p) } BTF_SET8_START(scx_kfunc_ids_ops_only) +BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 1cdef69a6855..d246e5c2d3c7 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -19,6 +19,14 @@ enum scx_enq_flags { /* high 32bits are SCX specific */ + /* + * Set the following to trigger preemption when calling + * scx_bpf_dispatch() with a local dsq as the target. The slice of the + * current task is cleared to zero and the CPU is kicked into the + * scheduling path. Implies %SCX_ENQ_HEAD. + */ + SCX_ENQ_PREEMPT = 1LLU << 32, + /* * The task being enqueued is the only task available for the cpu. By * default, ext core keeps executing such tasks but when @@ -55,6 +63,10 @@ enum scx_pick_idle_cpu_flags { SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */ }; +enum scx_kick_flags { + SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ +}; + #ifdef CONFIG_SCHED_CLASS_EXT struct sched_enq_and_set_ctx { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aec09e99cdb0..fc8e23f94e0a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -679,6 +679,9 @@ struct scx_rq { unsigned long ops_qseq; u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_preempt; + struct irq_work kick_cpus_irq_work; }; #endif /* CONFIG_SCHED_CLASS_EXT */ diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index 81e484defd9b..590c84ac602d 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -58,6 +58,7 @@ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; -- 2.42.0