This is a note to let you know that I've just added the patch titled mptcp: annotate data-races around msk->rmem_fwd_alloc to the 6.5-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: mptcp-annotate-data-races-around-msk-rmem_fwd_alloc.patch and it can be found in the queue-6.5 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 4b4296591bbb909409efad05306c1e7e54d606ec Author: Eric Dumazet <edumazet@xxxxxxxxxx> Date: Thu Aug 31 13:52:10 2023 +0000 mptcp: annotate data-races around msk->rmem_fwd_alloc [ Upstream commit 9531e4a83febc3fb47ac77e24cfb5ea97e50034d ] msk->rmem_fwd_alloc can be read locklessly. Add mptcp_rmem_fwd_alloc_add(), similar to sk_forward_alloc_add(), and appropriate READ_ONCE()/WRITE_ONCE() annotations. Fixes: 6511882cdd82 ("mptcp: allocate fwd memory separately on the rx and tx path") Signed-off-by: Eric Dumazet <edumazet@xxxxxxxxxx> Cc: Paolo Abeni <pabeni@xxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 996e031dff78a..40258d9f8c799 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -136,9 +136,15 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } +static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size) +{ + WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc, + mptcp_sk(sk)->rmem_fwd_alloc + size); +} + static void mptcp_rmem_charge(struct sock *sk, int size) { - mptcp_sk(sk)->rmem_fwd_alloc -= size; + mptcp_rmem_fwd_alloc_add(sk, -size); } static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, @@ -179,7 +185,7 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, static void __mptcp_rmem_reclaim(struct sock *sk, int amount) { amount >>= PAGE_SHIFT; - mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT; + mptcp_rmem_charge(sk, amount << PAGE_SHIFT); __sk_mem_reduce_allocated(sk, amount); } @@ -188,7 +194,7 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size) struct mptcp_sock *msk = mptcp_sk(sk); int reclaimable; - msk->rmem_fwd_alloc += size; + mptcp_rmem_fwd_alloc_add(sk, size); reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); /* see sk_mem_uncharge() for the rationale behind the following schema */ @@ -343,7 +349,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) return false; - msk->rmem_fwd_alloc += amount; + mptcp_rmem_fwd_alloc_add(sk, amount); return true; } @@ -3243,7 +3249,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) * inet_sock_destruct() will dispose it */ sk_forward_alloc_add(sk, msk->rmem_fwd_alloc); - msk->rmem_fwd_alloc = 0; + WRITE_ONCE(msk->rmem_fwd_alloc, 0); mptcp_token_destroy(msk); mptcp_pm_free_anno_list(msk); mptcp_free_local_addr_list(msk); @@ -3513,7 +3519,8 @@ static void mptcp_shutdown(struct sock *sk, int how) static int mptcp_forward_alloc_get(const struct sock *sk) { - return READ_ONCE(sk->sk_forward_alloc) + mptcp_sk(sk)->rmem_fwd_alloc; + return READ_ONCE(sk->sk_forward_alloc) + + READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc); } static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)