As we are renaming the mono_delivery_time to tstamp_type, it makes sense to start assigning tstamp_type based out if enum defined as part of this commit Earlier we used bool arg flag to check if the tstamp is mono in function skb_set_delivery_time, Now the signature of the functions accepts enum to distinguish between mono and real time. Link: https://lore.kernel.org/netdev/bc037db4-58bb-4861-ac31-a361a93841d3@xxxxxxxxx/ Signed-off-by: Abhishek Chauhan <quic_abchauha@xxxxxxxxxxx> --- include/linux/skbuff.h | 13 +++++++++---- net/bridge/netfilter/nf_conntrack_bridge.c | 2 +- net/core/dev.c | 2 +- net/core/filter.c | 4 ++-- net/ipv4/ip_output.c | 2 +- net/ipv4/tcp_output.c | 14 +++++++------- net/ipv6/ip6_output.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/sched/act_bpf.c | 2 +- net/sched/cls_bpf.c | 2 +- 10 files changed, 25 insertions(+), 20 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8210d699d8e9..6160185f0fe0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -701,6 +701,11 @@ typedef unsigned int sk_buff_data_t; #else typedef unsigned char *sk_buff_data_t; #endif + +enum skb_tstamp_type { + SKB_TSTAMP_TYPE_RX_REAL = 0, /* A RX (receive) time in real */ + SKB_TSTAMP_TYPE_TX_MONO = 1, /* A TX (delivery) time in mono */ +}; /** * DOC: Basic sk_buff geometry @@ -4257,7 +4262,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; } static inline ktime_t net_timedelta(ktime_t t) @@ -4266,10 +4271,10 @@ static inline ktime_t net_timedelta(ktime_t t) } static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, - bool mono) + enum skb_tstamp_type tstamp_type) { skb->tstamp = kt; - skb->tstamp_type = kt && mono; + skb->tstamp_type = kt && tstamp_type; } DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); @@ -4280,7 +4285,7 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); static inline void skb_clear_delivery_time(struct sk_buff *skb) { if (skb->tstamp_type) { - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); else diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 989435bd1690..b970ab2279cf 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; - bool tstamp_type = skb->tstamp_type; + u8 tstamp_type = skb->tstamp_type; unsigned int hlen, ll_rs, mtu; ktime_t tstamp = skb->tstamp; struct ip_frag_state state; diff --git a/net/core/dev.c b/net/core/dev.c index 8b88f8118052..9a84156fab3c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2113,7 +2113,7 @@ EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp = 0; - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); } diff --git a/net/core/filter.c b/net/core/filter.c index 0f535defdd2c..1c943a165c30 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7698,13 +7698,13 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, if (!tstamp) return -EINVAL; skb->tstamp = tstamp; - skb->tstamp_type = 1; + skb->tstamp_type = SKB_TSTAMP_TYPE_TX_MONO; break; case BPF_SKB_TSTAMP_UNSPEC: if (tstamp) return -EINVAL; skb->tstamp = 0; - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; break; default: return -EINVAL; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e8ec7e8ae2e0..62e457f7c02c 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, { struct iphdr *iph; struct sk_buff *skb2; - bool tstamp_type = skb->tstamp_type; + u8 tstamp_type = skb->tstamp_type; struct rtable *rt = skb_rtable(skb); unsigned int mtu, hlen, ll_rs; struct ip_fraglist_iter iter; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e3167ad96567..071fe377747a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1297,7 +1297,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tp = tcp_sk(sk); prior_wstamp = tp->tcp_wstamp_ns; tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); - skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_TSTAMP_TYPE_TX_MONO); if (clone_it) { oskb = skb; @@ -1647,7 +1647,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, skb_split(skb, buff, len); - skb_set_delivery_time(buff, skb->tstamp, true); + skb_set_delivery_time(buff, skb->tstamp, SKB_TSTAMP_TYPE_TX_MONO); tcp_fragment_tstamp(skb, buff); old_factor = tcp_skb_pcount(skb); @@ -2728,7 +2728,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ tp->tcp_wstamp_ns = tp->tcp_clock_cache; - skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_TSTAMP_TYPE_TX_MONO); list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); tcp_init_tso_segs(skb, mss_now); goto repair; /* Skip network transmission */ @@ -3711,11 +3711,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #ifdef CONFIG_SYN_COOKIES if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) skb_set_delivery_time(skb, cookie_init_timestamp(req, now), - true); + SKB_TSTAMP_TYPE_TX_MONO); else #endif { - skb_set_delivery_time(skb, now, true); + skb_set_delivery_time(skb, now, SKB_TSTAMP_TYPE_TX_MONO); if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); } @@ -3802,7 +3802,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, synack_type, &opts); - skb_set_delivery_time(skb, now, true); + skb_set_delivery_time(skb, now, SKB_TSTAMP_TYPE_TX_MONO); tcp_add_tx_delay(skb, tp); return skb; @@ -3986,7 +3986,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); + skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_TSTAMP_TYPE_TX_MONO); /* Now full SYN+DATA was cloned and sent (or not), * remove the SYN from the original skb (syn_data) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 61ddc9549160..a9e819115622 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -859,7 +859,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; - bool tstamp_type = skb->tstamp_type; + u8 tstamp_type = skb->tstamp_type; struct ip6_frag_state state; unsigned int mtu, hlen, nexthdr_offset; ktime_t tstamp = skb->tstamp; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3f4cba49e9ee..a9bf9c630582 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -973,7 +973,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 mark = inet_twsk(sk)->tw_mark; else mark = READ_ONCE(sk->sk_mark); - skb_set_delivery_time(buff, tcp_transmit_time(sk), true); + skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_TSTAMP_TYPE_TX_MONO); } if (txhash) { /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d62edd36b455..6f64e867a5e9 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -55,7 +55,7 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb, filter_res = bpf_prog_run(filter, skb); } if (unlikely(!skb->tstamp && skb->tstamp_type)) - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) skb_orphan(skb); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index f9cb4378c754..7ee73618c438 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -105,7 +105,7 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, filter_res = bpf_prog_run(prog->filter, skb); } if (unlikely(!skb->tstamp && skb->tstamp_type)) - skb->tstamp_type = 0; + skb->tstamp_type = SKB_TSTAMP_TYPE_RX_REAL; if (prog->exts_integrated) { res->class = 0; -- 2.25.1