6.12-stable review patch. If anyone has any objections, please let me know. ------------------ From: Libo Chen <libo.chen@xxxxxxxxxx> This reverts commit fc20e87419e59d86f3bbcee2d4506bcd01c6450a which is commit ef7009decc30eb2515a64253791d61b72229c119 upstream. Commit "selftests/sched_ext: fix build after renames in sched_ext API” was pulled into 6.12.y without the sched_ext API renames it depends on. The prereqs can be found in https://lore.kernel.org/lkml/20241110200308.103681-1-tj@xxxxxxxxxx/ As a result, sched_ext selftests fail to compile. Cc: stable@xxxxxxxxxxxxxxx Fixes: fc20e87419e59 ("selftests/sched_ext: fix build after renames in sched_ext API") Signed-off-by: Libo Chen <libo.chen@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c | 2 +- tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c | 4 ++-- tools/testing/selftests/sched_ext/dsp_local_on.bpf.c | 2 +- tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c | 2 +- tools/testing/selftests/sched_ext/exit.bpf.c | 4 ++-- tools/testing/selftests/sched_ext/maximal.bpf.c | 4 ++-- tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c | 2 +- tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c | 2 +- tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c | 2 +- tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c | 2 +- tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c | 4 ++-- tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c | 8 ++++---- 12 files changed, 19 insertions(+), 19 deletions(-) --- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c +++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c @@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_s * If we dispatch to a bogus DSQ that will fall back to the * builtin global DSQ, we fail gracefully. */ - scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL, + scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL, p->scx.dsq_vtime, 0); return cpu; } --- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c +++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c @@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_ if (cpu >= 0) { /* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */ - scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, - p->scx.dsq_vtime, 0); + scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, + p->scx.dsq_vtime, 0); return cpu; } --- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c +++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c @@ -48,7 +48,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatc else target = scx_bpf_task_cpu(p); - scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0); bpf_task_release(p); } --- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c +++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c @@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails /* Can only call from ops.select_cpu() */ scx_bpf_select_cpu_dfl(p, 0, 0, &found); - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); } SEC(".struct_ops.link") --- a/tools/testing/selftests/sched_ext/exit.bpf.c +++ b/tools/testing/selftests/sched_ext/exit.bpf.c @@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct if (exit_point == EXIT_ENQUEUE) EXIT_CLEANLY(); - scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags); } void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p) @@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 c if (exit_point == EXIT_DISPATCH) EXIT_CLEANLY(); - scx_bpf_dsq_move_to_local(DSQ_ID); + scx_bpf_consume(DSQ_ID); } void BPF_STRUCT_OPS(exit_enable, struct task_struct *p) --- a/tools/testing/selftests/sched_ext/maximal.bpf.c +++ b/tools/testing/selftests/sched_ext/maximal.bpf.c @@ -22,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, s void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags) { - scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags); } void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags) @@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, str void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev) { - scx_bpf_dsq_move_to_local(DSQ_ID); + scx_bpf_consume(DSQ_ID); } void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags) --- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c @@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enque } scx_bpf_put_idle_cpumask(idle_mask); - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); } SEC(".struct_ops.link") --- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c @@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodis saw_local = true; } - scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags); } s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task, --- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c @@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_s cpu = prev_cpu; dispatch: - scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0); return cpu; } --- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c @@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_b s32 prev_cpu, u64 wake_flags) { /* Dispatching to a random DSQ should fail. */ - scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0); return prev_cpu; } --- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c @@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_d s32 prev_cpu, u64 wake_flags) { /* Dispatching twice in a row is disallowed. */ - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); return prev_cpu; } --- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c @@ -2,8 +2,8 @@ /* * A scheduler that validates that enqueue flags are properly stored and * applied at dispatch time when a task is directly dispatched from - * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(), - * and making the test a very basic vtime scheduler. + * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and + * making the test a very basic vtime scheduler. * * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. * Copyright (c) 2024 David Vernet <dvernet@xxxxxxxx> @@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_sele cpu = prev_cpu; scx_bpf_test_and_clear_cpu_idle(cpu); ddsp: - scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0); + scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0); return cpu; } void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p) { - if (scx_bpf_dsq_move_to_local(VTIME_DSQ)) + if (scx_bpf_consume(VTIME_DSQ)) consumed = true; }