On 09/02/2022 15:11, D. Wythe wrote: > +static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, > + struct request_sock *req, > + struct dst_entry *dst, > + struct request_sock *req_unhash, > + bool *own_req) > +{ > + struct smc_sock *smc; > + > + smc = (struct smc_sock *)((uintptr_t)sk->sk_user_data & ~SK_USER_DATA_NOCOPY); Did you run checkpatch.pl for these patches, for me this and other lines look longer than 80 characters. > + > + if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->smc_pendings) > > + sk->sk_max_ack_backlog) > + goto drop; > + > + if (sk_acceptq_is_full(&smc->sk)) { > + NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); > + goto drop; > + } > + > + /* passthrough to origin syn recv sock fct */ > + return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); > + > +drop: > + dst_release(dst); > + tcp_listendrop(sk); > + return NULL; > +} > + > static struct smc_hashinfo smc_v4_hashinfo = { > .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), > }; > @@ -1595,6 +1623,9 @@ static void smc_listen_out(struct smc_sock *new_smc) > struct smc_sock *lsmc = new_smc->listen_smc; > struct sock *newsmcsk = &new_smc->sk; > > + if (tcp_sk(new_smc->clcsock->sk)->syn_smc) > + atomic_dec(&lsmc->smc_pendings); > + > if (lsmc->sk.sk_state == SMC_LISTEN) { > lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); > smc_accept_enqueue(&lsmc->sk, newsmcsk); > @@ -2200,6 +2231,9 @@ static void smc_tcp_listen_work(struct work_struct *work) > if (!new_smc) > continue; > > + if (tcp_sk(new_smc->clcsock->sk)->syn_smc) > + atomic_inc(&lsmc->smc_pendings); > + > new_smc->listen_smc = lsmc; > new_smc->use_fallback = lsmc->use_fallback; > new_smc->fallback_rsn = lsmc->fallback_rsn; > @@ -2266,6 +2300,15 @@ static int smc_listen(struct socket *sock, int backlog) > smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready; > smc->clcsock->sk->sk_user_data = > (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); > + > + /* save origin ops */ > + smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops; > + > + smc->af_ops = *smc->ori_af_ops; > + smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock; > + > + inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; > + > rc = kernel_listen(smc->clcsock, backlog); > if (rc) { > smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; > diff --git a/net/smc/smc.h b/net/smc/smc.h > index 37b2001..5e5e38d 100644 > --- a/net/smc/smc.h > +++ b/net/smc/smc.h > @@ -252,6 +252,10 @@ struct smc_sock { /* smc sock container */ > bool use_fallback; /* fallback to tcp */ > int fallback_rsn; /* reason for fallback */ > u32 peer_diagnosis; /* decline reason from peer */ > + atomic_t smc_pendings; /* pending smc connections */ I don't like the name smc_pendings, its not very specific. What about queued_smc_hs? And for the comment: queued smc handshakes > + struct inet_connection_sock_af_ops af_ops; > + const struct inet_connection_sock_af_ops *ori_af_ops; > + /* origin af ops */ origin -> original > int sockopt_defer_accept; > /* sockopt TCP_DEFER_ACCEPT > * value -- Karsten