From: Jason Xing <kernelxing@xxxxxxxxxxx> It's a prep for bpf print function later. This patch only puts the original generating logic into one function, so that we integrate bpf print easily. No functional changes here. Signed-off-by: Jason Xing <kernelxing@xxxxxxxxxxx> --- include/linux/skbuff.h | 4 ++-- net/core/dev.c | 3 +-- net/core/skbuff.c | 41 +++++++++++++++++++++++++++++++++++------ 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 58009fa66102..53c6913560e4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -39,6 +39,7 @@ #include <net/net_debug.h> #include <net/dropreason-core.h> #include <net/netmem.h> +#include <uapi/linux/errqueue.h> /** * DOC: skb checksums @@ -4535,8 +4536,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) - skb_tstamp_tx(skb, NULL); + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND); } /** diff --git a/net/core/dev.c b/net/core/dev.c index 45a8c3dd4a64..5d584950564b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4350,8 +4350,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) skb_reset_mac_header(skb); skb_assert_len(skb); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) - __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); /* Disable soft irqs for various locks below. Also * stops preemption for RCU. diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6841e61a6bd0..74b840ffaf94 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5539,10 +5539,10 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); -void __skb_tstamp_tx(struct sk_buff *orig_skb, - const struct sk_buff *ack_skb, - struct skb_shared_hwtstamps *hwtstamps, - struct sock *sk, int tstype) +static void skb_tstamp_tx_output(struct sk_buff *orig_skb, + const struct sk_buff *ack_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype) { struct sk_buff *skb; bool tsonly, opt_stats = false; @@ -5594,13 +5594,42 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); } + +static bool skb_tstamp_is_set(const struct sk_buff *skb, int tstype) +{ + switch (tstype) { + case SCM_TSTAMP_SCHED: + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) + return true; + return false; + case SCM_TSTAMP_SND: + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)) + return true; + return false; + case SCM_TSTAMP_ACK: + if (TCP_SKB_CB(skb)->txstamp_ack) + return true; + return false; + } + + return false; +} + +void __skb_tstamp_tx(struct sk_buff *orig_skb, + const struct sk_buff *ack_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype) +{ + if (unlikely(skb_tstamp_is_set(orig_skb, tstype))) + skb_tstamp_tx_output(orig_skb, ack_skb, hwtstamps, sk, tstype); +} EXPORT_SYMBOL_GPL(__skb_tstamp_tx); void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) { - return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, - SCM_TSTAMP_SND); + return skb_tstamp_tx_output(orig_skb, NULL, hwtstamps, orig_skb->sk, + SCM_TSTAMP_SND); } EXPORT_SYMBOL_GPL(skb_tstamp_tx); -- 2.37.3