This is a note to let you know that I've just added the patch titled net/tls: split tls_rx_reader_lock to the 6.5-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: net-tls-split-tls_rx_reader_lock.patch and it can be found in the queue-6.5 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit c0578fe3ab6f4656528ea0923aec4e7d36a47c15 Author: Hannes Reinecke <hare@xxxxxxx> Date: Wed Jul 26 21:15:55 2023 +0200 net/tls: split tls_rx_reader_lock [ Upstream commit f9ae3204fb45d0749befc1cdff50f691c7461e5a ] Split tls_rx_reader_{lock,unlock} into an 'acquire/release' and the actual locking part. With that we can use the tls_rx_reader_lock in situations where the socket is already locked. Suggested-by: Sagi Grimberg <sagi@xxxxxxxxxxx> Signed-off-by: Hannes Reinecke <hare@xxxxxxx> Reviewed-by: Jakub Kicinski <kuba@xxxxxxxxxx> Link: https://lore.kernel.org/r/20230726191556.41714-6-hare@xxxxxxx Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx> Stable-dep-of: 419ce133ab92 ("tcp: allow again tcp_disconnect() when threads are waiting") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e047abc600893..a83f474933033 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1845,13 +1845,10 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, return sk_flush_backlog(sk); } -static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, - bool nonblock) +static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, + bool nonblock) { long timeo; - int err; - - lock_sock(sk); timeo = sock_rcvtimeo(sk, nonblock); @@ -1865,26 +1862,30 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, !READ_ONCE(ctx->reader_present), &wait); remove_wait_queue(&ctx->wq, &wait); - if (timeo <= 0) { - err = -EAGAIN; - goto err_unlock; - } - if (signal_pending(current)) { - err = sock_intr_errno(timeo); - goto err_unlock; - } + if (timeo <= 0) + return -EAGAIN; + if (signal_pending(current)) + return sock_intr_errno(timeo); } WRITE_ONCE(ctx->reader_present, 1); return 0; +} -err_unlock: - release_sock(sk); +static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, + bool nonblock) +{ + int err; + + lock_sock(sk); + err = tls_rx_reader_acquire(sk, ctx, nonblock); + if (err) + release_sock(sk); return err; } -static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) +static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) { if (unlikely(ctx->reader_contended)) { if (wq_has_sleeper(&ctx->wq)) @@ -1896,6 +1897,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) } WRITE_ONCE(ctx->reader_present, 0); +} + +static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) +{ + tls_rx_reader_release(sk, ctx); release_sock(sk); }