On 2/5/24 4:10 AM, Philo Lu wrote:
Allow using helper bpf_skb_load_bytes with BPF_PROG_TYPE_TRACING, which
is useful for skb parsing in raw_tp/fentry/fexit, especially for
non-linear paged skb data.
Selftests will be added if this patch is acceptable.
Signed-off-by: Philo Lu <lulie@xxxxxxxxxxxxxxxxx>
---
kernel/trace/bpf_trace.c | 3 +++
net/core/filter.c | 13 +++++++++++++
2 files changed, 16 insertions(+)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 241ddf5e3895..4b928d929962 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1945,6 +1945,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
extern const struct bpf_func_proto bpf_skb_output_proto;
extern const struct bpf_func_proto bpf_xdp_output_proto;
extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
+extern const struct bpf_func_proto bpf_skb_load_bytes_trace_proto;
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
@@ -2048,6 +2049,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_ptr_cookie_proto;
case BPF_FUNC_xdp_get_buff_len:
return &bpf_xdp_get_buff_len_trace_proto;
+ case BPF_FUNC_skb_load_bytes:
+ return &bpf_skb_load_bytes_trace_proto;
It is not safe for all BPF_PROG_TYPE_TRACING hooks. e.g. fexit/__kfree_skb.
It is pretty much only safe for BPF_TRACE_RAW_TP (i.e. "tp_btf"). Take a look at
prog_args_trusted(). Instead of making the bpf_skb_load_bytes() helper available
to "tp_btf", I would suggest to 1) make bpf_dynptr_from_skb() kfunc available to
"tp_btf", 2) enforce KF_TRUSTED_ARGS and 3) ensure it is rdonly (take a look at
bpf_dynptr_from_skb_rdonly). Together with bpf_dynptr_slice() kfunc, it should
be equivalent to the bpf_skb_load_bytes().
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
diff --git a/net/core/filter.c b/net/core/filter.c
index 9f806cfbc654..ec5622ae8770 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1764,6 +1764,19 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.arg4_type = ARG_CONST_SIZE,
};
+BTF_ID_LIST_SINGLE(bpf_skb_load_bytes_btf_ids, struct, sk_buff)
+
+const struct bpf_func_proto bpf_skb_load_bytes_trace_proto = {
+ .func = bpf_skb_load_bytes,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_skb_load_bytes_btf_ids[0],
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+};
+
int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
{
return ____bpf_skb_load_bytes(skb, offset, to, len);