Jordan Rife wrote: > Replace the offset-based approach for tracking progress through a bucket > in the UDP table with one based on unique, monotonically increasing > index numbers associated with each socket in a bucket. > > Signed-off-by: Jordan Rife <jrife@xxxxxxxxxx> > --- > include/net/sock.h | 2 ++ > include/net/udp.h | 1 + > net/ipv4/udp.c | 38 +++++++++++++++++++++++++------------- > 3 files changed, 28 insertions(+), 13 deletions(-) > > diff --git a/include/net/sock.h b/include/net/sock.h > index 8036b3b79cd8..b11f43e8e7ec 100644 > --- a/include/net/sock.h > +++ b/include/net/sock.h > @@ -228,6 +228,7 @@ struct sock_common { > u32 skc_window_clamp; > u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ > }; > + __s64 skc_idx; > /* public: */ > }; > > @@ -378,6 +379,7 @@ struct sock { > #define sk_incoming_cpu __sk_common.skc_incoming_cpu > #define sk_flags __sk_common.skc_flags > #define sk_rxhash __sk_common.skc_rxhash > +#define sk_idx __sk_common.skc_idx > > __cacheline_group_begin(sock_write_rx); > > diff --git a/include/net/udp.h b/include/net/udp.h > index 6e89520e100d..9398561addc6 100644 > --- a/include/net/udp.h > +++ b/include/net/udp.h > @@ -102,6 +102,7 @@ struct udp_table { > #endif > unsigned int mask; > unsigned int log; > + atomic64_t ver; > }; > extern struct udp_table udp_table; > void udp_table_init(struct udp_table *, const char *); > diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c > index a9bb9ce5438e..d7e9b3346983 100644 > --- a/net/ipv4/udp.c > +++ b/net/ipv4/udp.c > @@ -229,6 +229,11 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) > return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); > } > > +static inline __s64 udp_table_next_idx(struct udp_table *udptable, bool pos) > +{ > + return (pos ? 1 : -1) * atomic64_inc_return(&udptable->ver); > +} Can this BPF feature be fixed without adding extra complexity and cost to the normal protocol paths?