On Tue, Sep 28, 2021 at 04:09:45PM -0700, Kees Cook wrote: > In order to keep ahead of cases in the kernel where Control Flow > Integrity (CFI) may trip over function call casts, enabling > -Wcast-function-type is helpful. To that end, BPF_CAST_CALL causes > various warnings and is one of the last places in the kernel triggering > this warning. > > Most places using BPF_CAST_CALL actually just want a void * to perform > math on. It's not actually performing a call, so just use a different > helper to get the void *, by way of the new BPF_CALL_IMM() helper, which > can clean up a common copy/paste idiom as well. > > This change results in no object code difference. > > Cc: Alexei Starovoitov <ast@xxxxxxxxxx> > Cc: Daniel Borkmann <daniel@xxxxxxxxxxxxx> > Cc: Martin KaFai Lau <kafai@xxxxxx> > Cc: Song Liu <songliubraving@xxxxxx> > Cc: Yonghong Song <yhs@xxxxxx> > Cc: John Fastabend <john.fastabend@xxxxxxxxx> > Cc: KP Singh <kpsingh@xxxxxxxxxx> > Cc: netdev@xxxxxxxxxxxxxxx > Cc: bpf@xxxxxxxxxxxxxxx > Cc: Gustavo A. R. Silva <gustavoars@xxxxxxxxxx> > Link: https://github.com/KSPP/linux/issues/20 > Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> > Acked-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > Link: https://lore.kernel.org/lkml/CAEf4Bzb46=-J5Fxc3mMZ8JQPtK1uoE0q6+g6WPz53Cvx=CBEhw@xxxxxxxxxxxxxx Reviewed-by: Gustavo A. R. Silva <gustavoars@xxxxxxxxxx> Thanks -- Gustavo > --- > include/linux/filter.h | 6 +++++- > kernel/bpf/hashtab.c | 6 +++--- > kernel/bpf/verifier.c | 26 +++++++++----------------- > lib/test_bpf.c | 2 +- > 4 files changed, 18 insertions(+), 22 deletions(-) > > diff --git a/include/linux/filter.h b/include/linux/filter.h > index 4a93c12543ee..6c247663d4ce 100644 > --- a/include/linux/filter.h > +++ b/include/linux/filter.h > @@ -365,13 +365,17 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) > #define BPF_CAST_CALL(x) \ > ((u64 (*)(u64, u64, u64, u64, u64))(x)) > > +/* Convert function address to BPF immediate */ > + > +#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) > + > #define BPF_EMIT_CALL(FUNC) \ > ((struct bpf_insn) { \ > .code = BPF_JMP | BPF_CALL, \ > .dst_reg = 0, \ > .src_reg = 0, \ > .off = 0, \ > - .imm = ((FUNC) - __bpf_call_base) }) > + .imm = BPF_CALL_IMM(FUNC) }) > > /* Raw code statement block */ > > diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c > index 32471ba02708..3d8f9d6997d5 100644 > --- a/kernel/bpf/hashtab.c > +++ b/kernel/bpf/hashtab.c > @@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) > > BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, > (void *(*)(struct bpf_map *map, void *key))NULL)); > - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); > + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); > *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); > *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, > offsetof(struct htab_elem, key) + > @@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map, > > BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, > (void *(*)(struct bpf_map *map, void *key))NULL)); > - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); > + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); > *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); > *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, > offsetof(struct htab_elem, lru_node) + > @@ -2397,7 +2397,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map, > > BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, > (void *(*)(struct bpf_map *map, void *key))NULL)); > - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); > + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); > *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); > *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, > offsetof(struct htab_elem, key) + > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 7a8351604f67..1433752db740 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -1744,7 +1744,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) > > desc = &tab->descs[tab->nr_descs++]; > desc->func_id = func_id; > - desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base; > + desc->imm = BPF_CALL_IMM(addr); > err = btf_distill_func_proto(&env->log, btf_vmlinux, > func_proto, func_name, > &desc->func_model); > @@ -12514,8 +12514,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) > if (!bpf_pseudo_call(insn)) > continue; > subprog = insn->off; > - insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); > } > > /* we use the aux data to keep a list of the start addresses > @@ -12995,32 +12994,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > patch_map_ops_generic: > switch (insn->imm) { > case BPF_FUNC_map_lookup_elem: > - insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); > continue; > case BPF_FUNC_map_update_elem: > - insn->imm = BPF_CAST_CALL(ops->map_update_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_update_elem); > continue; > case BPF_FUNC_map_delete_elem: > - insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_delete_elem); > continue; > case BPF_FUNC_map_push_elem: > - insn->imm = BPF_CAST_CALL(ops->map_push_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_push_elem); > continue; > case BPF_FUNC_map_pop_elem: > - insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_pop_elem); > continue; > case BPF_FUNC_map_peek_elem: > - insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_peek_elem); > continue; > case BPF_FUNC_redirect_map: > - insn->imm = BPF_CAST_CALL(ops->map_redirect) - > - __bpf_call_base; > + insn->imm = BPF_CALL_IMM(ops->map_redirect); > continue; > } > > diff --git a/lib/test_bpf.c b/lib/test_bpf.c > index 08f438e6fe9e..21ea1ab253a1 100644 > --- a/lib/test_bpf.c > +++ b/lib/test_bpf.c > @@ -12439,7 +12439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) > err = -EFAULT; > goto out_err; > } > - *insn = BPF_EMIT_CALL(BPF_CAST_CALL(addr)); > + *insn = BPF_EMIT_CALL(addr); > if ((long)__bpf_call_base + insn->imm != addr) > *insn = BPF_JMP_A(0); /* Skip: NOP */ > break; > -- > 2.30.2 >