We can take advantage of the fact that both callers of sock_map_init_proto are holding a RCU read lock, and have verified that psock is valid. Signed-off-by: Lorenz Bauer <lmb@xxxxxxxxxxxxxx> Reviewed-by: Jakub Sitnicki <jakub@xxxxxxxxxxxxxx> Acked-by: John Fastabend <john.fastabend@xxxxxxxxx> --- net/core/sock_map.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index cb240d87e068..edfdce17b951 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -141,28 +141,17 @@ static void sock_map_unref(struct sock *sk, void *link_raw) } } -static int sock_map_init_proto(struct sock *sk) +static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) { - struct sk_psock *psock; struct proto *prot; sock_owned_by_me(sk); - rcu_read_lock(); - psock = sk_psock(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - return -EINVAL; - } - prot = tcp_bpf_get_proto(sk, psock); - if (IS_ERR(prot)) { - rcu_read_unlock(); + if (IS_ERR(prot)) return PTR_ERR(prot); - } sk_psock_update_proto(sk, psock, prot); - rcu_read_unlock(); return 0; } @@ -241,7 +230,7 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, if (msg_parser) psock_set_prog(&psock->progs.msg_parser, msg_parser); - ret = sock_map_init_proto(sk); + ret = sock_map_init_proto(sk, psock); if (ret < 0) goto out_drop; @@ -286,7 +275,7 @@ static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk) return -ENOMEM; } - ret = sock_map_init_proto(sk); + ret = sock_map_init_proto(sk, psock); if (ret < 0) sk_psock_put(sk, psock); return ret; -- 2.20.1