On Fri, Feb 14, 2025 at 12:13 PM Juntong Deng <juntong.deng@xxxxxxxxxxx> wrote: > +static int scx_kfunc_ids_ops_context_filter(const struct bpf_prog *prog, u32 kfunc_id) > +{ > + u32 moff, flags; > + > + if (!btf_id_set8_contains(&scx_kfunc_ids_ops_context, kfunc_id)) > + return 0; > + > + if (prog->type == BPF_PROG_TYPE_SYSCALL && > + btf_id_set8_contains(&scx_kfunc_ids_unlocked, kfunc_id)) > + return 0; > + > + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS && > + prog->aux->st_ops != &bpf_sched_ext_ops) > + return 0; > + > + /* prog->type == BPF_PROG_TYPE_STRUCT_OPS && prog->aux->st_ops == &bpf_sched_ext_ops*/ > + > + moff = prog->aux->attach_st_ops_member_off; > + flags = scx_ops_context_flags[SCX_MOFF_IDX(moff)]; > + > + if ((flags & SCX_OPS_KF_UNLOCKED) && > + btf_id_set8_contains(&scx_kfunc_ids_unlocked, kfunc_id)) > + return 0; > + > + if ((flags & SCX_OPS_KF_CPU_RELEASE) && > + btf_id_set8_contains(&scx_kfunc_ids_cpu_release, kfunc_id)) > + return 0; > + > + if ((flags & SCX_OPS_KF_DISPATCH) && > + btf_id_set8_contains(&scx_kfunc_ids_dispatch, kfunc_id)) > + return 0; > + > + if ((flags & SCX_OPS_KF_ENQUEUE) && > + btf_id_set8_contains(&scx_kfunc_ids_enqueue_dispatch, kfunc_id)) > + return 0; > + > + if ((flags & SCX_OPS_KF_SELECT_CPU) && > + btf_id_set8_contains(&scx_kfunc_ids_select_cpu, kfunc_id)) > + return 0; > + > + return -EACCES; > +} This looks great. Very good cleanup and run-time speed up. Please resend without RFC tag, so sched-ext folks can review.