From: Jason Xing <kernelxing@xxxxxxxxxxx> Introducing the lock to avoid affecting the applications which are not using timestamping bpf feature. Signed-off-by: Jason Xing <kernelxing@xxxxxxxxxxx> --- net/core/skbuff.c | 6 ++++-- net/ipv4/tcp.c | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 182a44815630..7c59ef501c74 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5659,7 +5659,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, { if (unlikely(skb_tstamp_is_set(orig_skb, tstype, false))) skb_tstamp_tx_output(orig_skb, ack_skb, hwtstamps, sk, tstype); - if (unlikely(skb_tstamp_is_set(orig_skb, tstype, true))) + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && + unlikely(skb_tstamp_is_set(orig_skb, tstype, true))) __skb_tstamp_tx_bpf(sk, orig_skb, hwtstamps, tstype); } EXPORT_SYMBOL_GPL(__skb_tstamp_tx); @@ -5670,7 +5671,8 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, int tstype = SCM_TSTAMP_SND; skb_tstamp_tx_output(orig_skb, NULL, hwtstamps, orig_skb->sk, tstype); - if (unlikely(skb_tstamp_is_set(orig_skb, tstype, true))) + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && + unlikely(skb_tstamp_is_set(orig_skb, tstype, true))) __skb_tstamp_tx_bpf(orig_skb->sk, orig_skb, hwtstamps, tstype); } EXPORT_SYMBOL_GPL(skb_tstamp_tx); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0a41006b10d1..3df802410ebf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -493,7 +493,8 @@ static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; } - if (SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING) && skb) { + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && + SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING) && skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); -- 2.37.3