On Fri, Sep 6, 2024 at 6:56 AM Daniel Borkmann <daniel@xxxxxxxxxxxxx> wrote: > > For all non-tracing helpers which formerly had ARG_PTR_TO_{LONG,INT} as input > arguments, zero the value for the case of an error as otherwise it could leak > memory. For tracing, it is not needed given CAP_PERFMON can already read all > kernel memory anyway hence bpf_get_func_arg() and bpf_get_func_ret() is skipped > in here. > > Also, rearrange the MTU checker helpers a bit to among other nit fixes > consolidate flag checks such that we only need to zero in one location with > regards to malformed flag inputs. > > Fixes: 8a67f2de9b1d ("bpf: expose bpf_strtol and bpf_strtoul to all program types") > Fixes: d7a4cb9b6705 ("bpf: Introduce bpf_strtol and bpf_strtoul helpers") > Signed-off-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx> > --- > v1 -> v2: > - only set *mtu_len in error path (Alexei) > > kernel/bpf/helpers.c | 2 ++ > kernel/bpf/syscall.c | 1 + > net/core/filter.c | 35 +++++++++++++++++------------------ > 3 files changed, 20 insertions(+), 18 deletions(-) > > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c > index 0587d0c2375a..ff66a0522799 100644 > --- a/kernel/bpf/helpers.c > +++ b/kernel/bpf/helpers.c > @@ -523,6 +523,7 @@ BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, > long long _res; > int err; > > + *res = 0; > err = __bpf_strtoll(buf, buf_len, flags, &_res); > if (err < 0) > return err; > @@ -549,6 +550,7 @@ BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, > bool is_negative; > int err; > > + *res = 0; > err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); > if (err < 0) > return err; > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c > index feb276771c03..513b4301a0af 100644 > --- a/kernel/bpf/syscall.c > +++ b/kernel/bpf/syscall.c > @@ -5934,6 +5934,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = { > > BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) > { > + *res = 0; > if (flags) > return -EINVAL; > > diff --git a/net/core/filter.c b/net/core/filter.c > index 4be175f84eb9..c219385e7bb4 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -6264,18 +6264,19 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, > int skb_len, dev_len; > int mtu; > > - if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) > - return -EINVAL; > - > - if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) > + if (unlikely((flags & ~(BPF_MTU_CHK_SEGS)) || > + (flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))) { > + *mtu_len = 0; > return -EINVAL; > + } meh, why? you have *mtu_len = 0 below anyways, so there is already duplication. I'd rather have extra *mtu_len than much more convoluted condition > > dev = __dev_via_ifindex(dev, ifindex); > - if (unlikely(!dev)) > + if (unlikely(!dev)) { > + *mtu_len = 0; > return -ENODEV; > + } > > mtu = READ_ONCE(dev->mtu); > - > dev_len = mtu + dev->hard_header_len; > > /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ [...]