In quite unlikely scenario when __tcp_alloc_md5sig_pool() succeeded in crypto_alloc_ahash(), but later failed to allocate per-cpu request or scratch area ahash will be leaked. In theory it can happen multiple times in OOM condition for every setsockopt(TCP_MD5SIG{,_EXT}). Add a clean-up path to free ahash. Signed-off-by: Dmitry Safonov <dima@xxxxxxxxxx> --- net/ipv4/tcp.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c0856a6af9f5..eb478028b1ea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4276,15 +4276,13 @@ static void __tcp_alloc_md5sig_pool(void) GFP_KERNEL, cpu_to_node(cpu)); if (!scratch) - return; + goto out_free; per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; } - if (per_cpu(tcp_md5sig_pool, cpu).md5_req) - continue; req = ahash_request_alloc(hash, GFP_KERNEL); if (!req) - return; + goto out_free; ahash_request_set_callback(req, 0, NULL, NULL); @@ -4295,6 +4293,16 @@ static void __tcp_alloc_md5sig_pool(void) */ smp_wmb(); tcp_md5sig_pool_populated = true; + return; + +out_free: + for_each_possible_cpu(cpu) { + if (per_cpu(tcp_md5sig_pool, cpu).md5_req == NULL) + break; + ahash_request_free(per_cpu(tcp_md5sig_pool, cpu).md5_req); + per_cpu(tcp_md5sig_pool, cpu).md5_req = NULL; + } + crypto_free_ahash(hash); } bool tcp_alloc_md5sig_pool(void) -- 2.33.1