On Mon, Aug 02, 2021 at 11:19 PM CEST, Jiang Wang wrote: [...] > diff --git a/net/core/sock_map.c b/net/core/sock_map.c > index ae5fa4338..42f50ea7a 100644 > --- a/net/core/sock_map.c > +++ b/net/core/sock_map.c > @@ -517,9 +517,15 @@ static bool sk_is_tcp(const struct sock *sk) > sk->sk_protocol == IPPROTO_TCP; > } > > +static bool sk_is_unix_stream(const struct sock *sk) > +{ > + return sk->sk_type == SOCK_STREAM && > + sk->sk_protocol == PF_UNIX; > +} > + > static bool sock_map_redirect_allowed(const struct sock *sk) > { > - if (sk_is_tcp(sk)) > + if (sk_is_tcp(sk) || sk_is_unix_stream(sk)) > return sk->sk_state != TCP_LISTEN; > else > return sk->sk_state == TCP_ESTABLISHED; Let me provide some context. The reason why we check != TCP_LISTEN for TCP sockets is that we want to allow redirect redirect to sockets that are about to transition from TCP_SYN_RECV to TCP_ESTABLISHED, in addition to sockets already in TCP_ESTABLISHED state. That's because BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB callback happens when socket is still in TCP_SYN_RECV state. With BPF sockops program, we can insert such socket into a sockmap. Hence, there is a short window of opportunity when we could redirect to a socket in TCP_SYN_RECV. UNIX sockets can be only in TCP_{CLOSE,LISTEN,ESTABLISHED} state, AFAIK. So it is sufficient to rely on the default == TCP_ESTABLISHED check. > diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c > index 0ae3fc4c8..9c1711c67 100644 > --- a/net/unix/af_unix.c > +++ b/net/unix/af_unix.c > @@ -791,17 +791,35 @@ static void unix_close(struct sock *sk, long timeout) > */ > } > > -struct proto unix_proto = { > - .name = "UNIX", > +static void unix_unhash(struct sock *sk) > +{ > + /* Nothing to do here, unix socket does not need a ->unhash(). > + * This is merely for sockmap. > + */ > +} > + > +struct proto unix_dgram_proto = { > + .name = "UNIX-DGRAM", > + .owner = THIS_MODULE, > + .obj_size = sizeof(struct unix_sock), > + .close = unix_close, > +#ifdef CONFIG_BPF_SYSCALL > + .psock_update_sk_prot = unix_dgram_bpf_update_proto, > +#endif > +}; > + > +struct proto unix_stream_proto = { > + .name = "UNIX-STREAM", > .owner = THIS_MODULE, > .obj_size = sizeof(struct unix_sock), > .close = unix_close, > + .unhash = unix_unhash, > #ifdef CONFIG_BPF_SYSCALL > - .psock_update_sk_prot = unix_bpf_update_proto, > + .psock_update_sk_prot = unix_stream_bpf_update_proto, > #endif > }; > > -static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) > +static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) > { > struct sock *sk = NULL; > struct unix_sock *u; > @@ -810,7 +828,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) > if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) > goto out; > > - sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern); > + if (type == SOCK_STREAM) > + sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); > + else /*dgram and seqpacket */ > + sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); > + > if (!sk) > goto out; > > @@ -872,7 +894,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol, > return -ESOCKTNOSUPPORT; > } > > - return unix_create1(net, sock, kern) ? 0 : -ENOMEM; > + return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM; > } > > static int unix_release(struct socket *sock) > @@ -1286,7 +1308,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, > err = -ENOMEM; > > /* create new sock for complete connection */ > - newsk = unix_create1(sock_net(sk), NULL, 0); > + newsk = unix_create1(sock_net(sk), NULL, 0, sock->type); > if (newsk == NULL) > goto out; > > @@ -2214,7 +2236,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si > struct sock *sk = sock->sk; > > #ifdef CONFIG_BPF_SYSCALL > - if (sk->sk_prot != &unix_proto) > + if (sk->sk_prot != &unix_dgram_proto) > return sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, > flags & ~MSG_DONTWAIT, NULL); > #endif KASAN might be unhappy about access to sk->sk_prot not annotated with READ_ONCE. In unix_bpf we have WRITE_ONCE(sk->sk_prot, ...) [1] [...] [1] https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#why-kernel-code-should-use-read_once-and-write_once-for-shared-memory-accesses