Introduce tskey_bpf to correlate tcp_sendmsg timestamp with other three points (SND/SW/ACK). More details can be found in the selftest. For TCP, tskey_bpf is used to store the initial write_seq value the moment tcp_sendmsg is called, so that the last skb of this call will have the same tskey_bpf with tcp_sendmsg bpf callback. UDP works similarly because tskey_bpf can increase by one everytime udp_sendmsg gets called. It will be implemented soon. Signed-off-by: Jason Xing <kerneljasonxing@xxxxxxxxx> --- include/linux/skbuff.h | 2 ++ include/uapi/linux/bpf.h | 3 +++ net/core/sock.c | 3 ++- net/ipv4/tcp.c | 10 ++++++++-- tools/include/uapi/linux/bpf.h | 3 +++ 5 files changed, 18 insertions(+), 3 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d3ef8db94a94..3b7b470d5d89 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -609,6 +609,8 @@ struct skb_shared_info { }; unsigned int gso_type; u32 tskey; + /* For TCP, it records the initial write_seq when sendmsg is called */ + u32 tskey_bpf; /* * Warning : all fields before dataref are cleared in __alloc_skb() diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a0aff1b4eb61..87420c0f2235 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -7037,6 +7037,9 @@ enum { * feature is on. It indicates the * recorded timestamp. */ + BPF_SOCK_OPS_TS_TCP_SND_CB, /* Called when every tcp_sendmsg + * syscall is triggered + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect diff --git a/net/core/sock.c b/net/core/sock.c index 2f54e60a50d4..e74ab0e2979d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -958,7 +958,8 @@ void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) if (sk_is_tcp(sk) && sk_fullsock(sk)) sock_ops.is_fullsock = 1; sock_ops.sk = sk; - bpf_skops_init_skb(&sock_ops, skb, 0); + if (skb) + bpf_skops_init_skb(&sock_ops, skb, 0); sock_ops.timestamp_used = 1; __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0a41006b10d1..b6e0db5e4ead 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -477,7 +477,7 @@ void tcp_init_sock(struct sock *sk) } EXPORT_SYMBOL(tcp_init_sock); -static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) +static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc, u32 first_write_seq) { struct sk_buff *skb = tcp_write_queue_tail(sk); u32 tsflags = sockc->tsflags; @@ -500,6 +500,7 @@ static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) tcb->txstamp_ack_bpf = 1; shinfo->tx_flags |= SKBTX_BPF; shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; + shinfo->tskey_bpf = first_write_seq; } } @@ -1067,10 +1068,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; int process_backlog = 0; + u32 first_write_seq = 0; int zc = 0; long timeo; flags = msg->msg_flags; + if (SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING)) { + first_write_seq = tp->write_seq; + bpf_skops_tx_timestamping(sk, NULL, BPF_SOCK_OPS_TS_TCP_SND_CB); + } if ((flags & MSG_ZEROCOPY) && size) { if (msg->msg_ubuf) { @@ -1331,7 +1337,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) out: if (copied) { - tcp_tx_timestamp(sk, &sockc); + tcp_tx_timestamp(sk, &sockc, first_write_seq); tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); } out_nopush: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0fe7d663a244..3769e38e052d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -7030,6 +7030,9 @@ enum { * feature is on. It indicates the * recorded timestamp. */ + BPF_SOCK_OPS_TS_TCP_SND_CB, /* Called when every tcp_sendmsg + * syscall is triggered + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect -- 2.43.5