On Wed, Jul 07, 2021 at 11:47:47PM +0200, Jiri Olsa wrote: > > +static bool allow_get_func_ip_tracing(struct bpf_verifier_env *env) > +{ > + return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64); Why does it have to be gated by 'jited && x86_64' ? It's gated by bpf trampoline and it's only implemented on x86_64 so far. The trampoline has plenty of features. I would expect bpf trampoline for arm64 to implement all of them. If not the func_ip would be just one of the trampoline features that couldn't be implemented and at that time we'd need a flag mask of a sort, but I'd rather push of feature equivalence between trampoline implementations. Then jited part also doesn't seem to be necessary. The trampoline passed pointer to a stack in R1. Interpreter should deal with BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8) insn the same way and it should work, since trampoline prepared it. What did I miss? > +static int has_get_func_ip(struct bpf_verifier_env *env) > +{ > + enum bpf_attach_type eatype = env->prog->expected_attach_type; > + enum bpf_prog_type type = resolve_prog_type(env->prog); > + int func_id = BPF_FUNC_get_func_ip; > + > + if (type == BPF_PROG_TYPE_TRACING) { > + if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && > + eatype != BPF_MODIFY_RETURN) { > + verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", > + func_id_name(func_id), func_id); > + return -ENOTSUPP; > + } > + if (!allow_get_func_ip_tracing(env)) { > + verbose(env, "func %s#%d for tracing programs supported only for JITed x86_64\n", > + func_id_name(func_id), func_id); > + return -ENOTSUPP; > + } > + return 0; > + } > + > + verbose(env, "func %s#%d not supported for program type %d\n", > + func_id_name(func_id), func_id, type); > + return -ENOTSUPP; > +} > + > static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > int *insn_idx_p) > { > @@ -6225,6 +6256,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn > if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) > env->prog->call_get_stack = true; > > + if (func_id == BPF_FUNC_get_func_ip) { > + if (has_get_func_ip(env)) > + return -ENOTSUPP; > + env->prog->call_get_func_ip = true; > + } > + > if (changes_data) > clear_all_pkt_pointers(env); > return 0; > @@ -12369,6 +12406,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > { > struct bpf_prog *prog = env->prog; > bool expect_blinding = bpf_jit_blinding_enabled(prog); > + enum bpf_prog_type prog_type = resolve_prog_type(prog); > struct bpf_insn *insn = prog->insnsi; > const struct bpf_func_proto *fn; > const int insn_cnt = prog->len; > @@ -12702,6 +12740,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > continue; > } > > + /* Implement bpf_get_func_ip inline. */ > + if (prog_type == BPF_PROG_TYPE_TRACING && > + insn->imm == BPF_FUNC_get_func_ip) { > + /* Load IP address from ctx - 8 */ > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); > + > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); > + if (!new_prog) > + return -ENOMEM; > + > + env->prog = prog = new_prog; > + insn = new_prog->insnsi + i + delta; > + continue; > + } > + > patch_call_imm: > fn = env->ops->get_func_proto(insn->imm, env->prog); > /* all functions that have prototype and verifier allowed > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index 64bd2d84367f..9edd3b1a00ad 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -948,6 +948,19 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = { > .arg5_type = ARG_ANYTHING, > }; > > +BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) > +{ > + /* Stub, the helper call is inlined in the program. */ > + return 0; > +} may be add a WARN in here that it should never be executed ? Or may be add an actual implementation: return ((u64 *)ctx)[-1]; and check that it works without inlining by the verifier?