On 1/20/25 5:28 PM, Jason Xing wrote:
In the next round, we will support the UDP proto for SO_TIMESTAMPING
bpf extension, so we need to ensure there is no safety problem, which
is ususally caused by UDP socket trying to access TCP fields.
These approaches can be categorized into two groups:
1. add TCP protocol check
2. add sock op check
Same as patch 3. The commit message needs adjustment. I would combine patch 3
and patch 4 because ...
Signed-off-by: Jason Xing <kerneljasonxing@xxxxxxxxx>
---
net/core/filter.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c
index fdd305b4cfbb..934431886876 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5523,6 +5523,11 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
return -EINVAL;
}
+static bool is_locked_tcp_sock_ops(struct bpf_sock_ops_kern *bpf_sock)
+{
+ return bpf_sock->op <= BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
More bike shedding...
After sleeping on it again, I think it can just test the
bpf_sock->allow_tcp_access instead.
+}
+
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
@@ -5673,7 +5678,12 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
- return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
+ struct sock *sk = bpf_sock->sk;
+
+ if (is_locked_tcp_sock_ops(bpf_sock) && sk_fullsock(sk))
afaict, the new timestamping callbacks still can do setsockopt and it is
incorrect. It should be:
if (!bpf_sock->allow_tcp_access)
return -EOPNOTSUPP;
I recalled I have asked in v5 but it may be buried in the long thread, so asking
here again. Please add test(s) to check that the new timestamping callbacks
cannot call setsockopt and read/write to some of the tcp_sock fields through the
bpf_sock_ops.
+ sock_owned_by_me(sk);
Not needed and instead...
+
+ return __bpf_setsockopt(sk, level, optname, optval, optlen);
keep the original _bpf_setsockopt().
}
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
@@ -5759,6 +5769,7 @@ BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
+ bpf_sock->sk->sk_protocol == IPPROTO_TCP &&
optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
No need to allow getsockopt regardless what SOL_* it is asking. To keep it
simple, I would just disable both getsockopt and setsockopt for all SOL_* for
the new timestamping callbacks. Nothing is lost, the bpf prog can directly read
the sk.
int ret, copy_len = 0;
const u8 *start;
@@ -5800,7 +5811,8 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
struct sock *sk = bpf_sock->sk;
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
- if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
+ if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk) ||
+ sk->sk_protocol != IPPROTO_TCP)
Same here. It should disallow this "set" helper for the timestamping callbacks
which do not hold the lock.
return -EINVAL;
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
@@ -7609,6 +7621,9 @@ BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
u8 search_kind, search_len, copy_len, magic_len;
int ret;
+ if (!is_locked_tcp_sock_ops(bpf_sock))
+ return -EOPNOTSUPP;
This is correct, just change it to "!bpf_sock->allow_tcp_access".
All the above changed helpers should use the same test and the same return handling.
+
/* 2 byte is the minimal option len except TCPOPT_NOP and
* TCPOPT_EOL which are useless for the bpf prog to learn
* and this helper disallow loading them also.