Add ops.cpu_online/offline() which are invoked when CPUs come online and offline respectively. As the enqueue path already automatically bypasses tasks to the local dsq on a deactivated CPU, BPF schedulers are guaranteed to see tasks only on CPUs which are between online() and offline(). Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reviewed-by: David Vernet <dvernet@xxxxxxxx> Acked-by: Josh Don <joshdon@xxxxxxxxxx> Acked-by: Hao Luo <haoluo@xxxxxxxxxx> Acked-by: Barret Rhoden <brho@xxxxxxxxxx> --- include/linux/sched/ext.h | 18 ++++++++++++++++++ kernel/sched/ext.c | 18 +++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 8a275ec05ee1..c17957bd75df 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -377,6 +377,24 @@ struct sched_ext_ops { */ void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); + /** + * cpu_online - A CPU became online + * @cpu: CPU which just came up + * + * @cpu just came online. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs beforehand. + */ + void (*cpu_online)(s32 cpu); + + /** + * cpu_offline - A CPU is going offline + * @cpu: CPU which is going offline + * + * @cpu is going offline. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs afterwards. + */ + void (*cpu_offline)(s32 cpu); + /** * prep_enable - Prepare to enable BPF scheduling for a task * @p: task to prepare BPF scheduling for diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1b83dddbdf10..e54d8c7d19a9 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1392,7 +1392,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, * emitted in scx_notify_pick_next_task(). */ if (SCX_HAS_OP(cpu_acquire)) - SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); + SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), + NULL); rq->scx.cpu_released = false; } @@ -1852,6 +1853,18 @@ void __scx_update_idle(struct rq *rq, bool idle) #endif } +static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_online) && reason == RQ_ONOFF_HOTPLUG) + SCX_CALL_OP(SCX_KF_REST, cpu_online, cpu_of(rq)); +} + +static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_offline) && reason == RQ_ONOFF_HOTPLUG) + SCX_CALL_OP(SCX_KF_REST, cpu_offline, cpu_of(rq)); +} + #else /* !CONFIG_SMP */ static bool test_and_clear_cpu_idle(int cpu) { return false; } @@ -2370,6 +2383,9 @@ DEFINE_SCHED_CLASS(ext) = { .balance = balance_scx, .select_task_rq = select_task_rq_scx, .set_cpus_allowed = set_cpus_allowed_scx, + + .rq_online = rq_online_scx, + .rq_offline = rq_offline_scx, #endif .task_tick = task_tick_scx, -- 2.41.0