This is a note to let you know that I've just added the patch titled socket, bpf: fix possible use after free to the 4.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: socket-bpf-fix-possible-use-after-free.patch and it can be found in the queue-4.9 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Tue Oct 10 16:09:22 CEST 2017 From: Eric Dumazet <edumazet@xxxxxxxxxx> Date: Mon, 2 Oct 2017 12:20:51 -0700 Subject: socket, bpf: fix possible use after free From: Eric Dumazet <edumazet@xxxxxxxxxx> [ Upstream commit eefca20eb20c66b06cf5ed09b49b1a7caaa27b7b ] Starting from linux-4.4, 3WHS no longer takes the listener lock. Since this time, we might hit a use-after-free in sk_filter_charge(), if the filter we got in the memcpy() of the listener content just happened to be replaced by a thread changing listener BPF filter. To fix this, we need to make sure the filter refcount is not already zero before incrementing it again. Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets") Signed-off-by: Eric Dumazet <edumazet@xxxxxxxxxx> Acked-by: Alexei Starovoitov <ast@xxxxxxxxxx> Acked-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- net/core/filter.c | 15 +++++++++++++-- net/core/sock.c | 5 ++++- 2 files changed, 17 insertions(+), 3 deletions(-) --- a/net/core/filter.c +++ b/net/core/filter.c @@ -937,20 +937,31 @@ void sk_filter_uncharge(struct sock *sk, /* try to charge the socket memory if there is space available * return true on success */ -bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) +static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); /* same check as in sock_kmalloc() */ if (filter_size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { - atomic_inc(&fp->refcnt); atomic_add(filter_size, &sk->sk_omem_alloc); return true; } return false; } +bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) +{ + if (!atomic_inc_not_zero(&fp->refcnt)) + return false; + + if (!__sk_filter_charge(sk, fp)) { + sk_filter_release(fp); + return false; + } + return true; +} + static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) { struct sock_filter *old_prog; --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1528,13 +1528,16 @@ struct sock *sk_clone_lock(const struct sock_reset_flag(newsk, SOCK_DONE); skb_queue_head_init(&newsk->sk_error_queue); - filter = rcu_dereference_protected(newsk->sk_filter, 1); + rcu_read_lock(); + filter = rcu_dereference(sk->sk_filter); if (filter != NULL) /* though it's an empty new sock, the charging may fail * if sysctl_optmem_max was changed between creation of * original socket and cloning */ is_charged = sk_filter_charge(newsk, filter); + RCU_INIT_POINTER(newsk->sk_filter, filter); + rcu_read_unlock(); if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { /* We need to make sure that we don't uncharge the new Patches currently in stable-queue which might be from edumazet@xxxxxxxxxx are queue-4.9/packet-in-packet_do_bind-test-fanout-with-bind_lock-held.patch queue-4.9/socket-bpf-fix-possible-use-after-free.patch queue-4.9/packet-only-test-po-has_vnet_hdr-once-in-packet_snd.patch queue-4.9/tcp-fastopen-fix-on-syn-data-transmit-failure.patch queue-4.9/net-set-sk_prot_creator-when-cloning-sockets-to-the-right-proto.patch queue-4.9/tcp-fix-data-delivery-rate.patch