When sockets are destroyed in the BPF iterator context, sock lock is already acquired, so skip taking the lock. This allows TCP listening sockets to be destroyed from BPF programs. Signed-off-by: Aditi Ghag <aditi.ghag@xxxxxxxxxxxxx> --- net/ipv4/inet_hashtables.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e41fdc38ce19..5543a3e0d1b4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -777,9 +777,11 @@ void inet_unhash(struct sock *sk) /* Don't disable bottom halves while acquiring the lock to * avoid circular locking dependency on PREEMPT_RT. */ - spin_lock(&ilb2->lock); + if (!has_current_bpf_ctx()) + spin_lock(&ilb2->lock); if (sk_unhashed(sk)) { - spin_unlock(&ilb2->lock); + if (!has_current_bpf_ctx()) + spin_unlock(&ilb2->lock); return; } @@ -788,7 +790,8 @@ void inet_unhash(struct sock *sk) __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - spin_unlock(&ilb2->lock); + if (!has_current_bpf_ctx()) + spin_unlock(&ilb2->lock); } else { spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); -- 2.34.1