On Tue, 2023-10-17 at 16:54 +0300, Dan Carpenter wrote: > The patch 419ce133ab92: "tcp: allow again tcp_disconnect() when > threads are waiting" from Oct 11, 2023 (linux-next), leads to the > following Smatch static checker warning: > > net/ipv4/tcp_bpf.c:324 tcp_bpf_recvmsg_parser() warn: inconsistent returns '&sk->sk_lock.slock'. > net/ipv4/tcp_bpf.c:370 tcp_bpf_recvmsg() warn: inconsistent returns '&sk->sk_lock.slock'. > > net/ipv4/tcp_bpf.c > 218 static int tcp_bpf_recvmsg_parser(struct sock *sk, > 219 struct msghdr *msg, > 220 size_t len, > 221 int flags, > 222 int *addr_len) > 223 { > 224 struct tcp_sock *tcp = tcp_sk(sk); > 225 int peek = flags & MSG_PEEK; > 226 u32 seq = tcp->copied_seq; > 227 struct sk_psock *psock; > 228 int copied = 0; > 229 > 230 if (unlikely(flags & MSG_ERRQUEUE)) > 231 return inet_recv_error(sk, msg, len, addr_len); > 232 > 233 if (!len) > 234 return 0; > 235 > 236 psock = sk_psock_get(sk); > 237 if (unlikely(!psock)) > 238 return tcp_recvmsg(sk, msg, len, flags, addr_len); > 239 > 240 lock_sock(sk); > 241 > 242 /* We may have received data on the sk_receive_queue pre-accept and > 243 * then we can not use read_skb in this context because we haven't > 244 * assigned a sk_socket yet so have no link to the ops. The work-around > 245 * is to check the sk_receive_queue and in these cases read skbs off > 246 * queue again. The read_skb hook is not running at this point because > 247 * of lock_sock so we avoid having multiple runners in read_skb. > 248 */ > 249 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { > 250 tcp_data_ready(sk); > 251 /* This handles the ENOMEM errors if we both receive data > 252 * pre accept and are already under memory pressure. At least > 253 * let user know to retry. > 254 */ > 255 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { > 256 copied = -EAGAIN; > 257 goto out; > 258 } > 259 } > 260 > 261 msg_bytes_ready: > 262 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); > 263 /* The typical case for EFAULT is the socket was gracefully > 264 * shutdown with a FIN pkt. So check here the other case is > 265 * some error on copy_page_to_iter which would be unexpected. > 266 * On fin return correct return code to zero. > 267 */ > 268 if (copied == -EFAULT) { > 269 bool is_fin = is_next_msg_fin(psock); > 270 > 271 if (is_fin) { > 272 copied = 0; > 273 seq++; > 274 goto out; > 275 } > 276 } > 277 seq += copied; > 278 if (!copied) { > 279 long timeo; > 280 int data; > 281 > 282 if (sock_flag(sk, SOCK_DONE)) > 283 goto out; > 284 > 285 if (sk->sk_err) { > 286 copied = sock_error(sk); > 287 goto out; > 288 } > 289 > 290 if (sk->sk_shutdown & RCV_SHUTDOWN) > 291 goto out; > 292 > 293 if (sk->sk_state == TCP_CLOSE) { > 294 copied = -ENOTCONN; > 295 goto out; > 296 } > 297 > 298 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); > 299 if (!timeo) { > 300 copied = -EAGAIN; > 301 goto out; > 302 } > 303 > 304 if (signal_pending(current)) { > 305 copied = sock_intr_errno(timeo); > 306 goto out; > 307 } > 308 > 309 data = tcp_msg_wait_data(sk, psock, timeo); > 310 if (data < 0) > 311 return data; > > Do we need to call release_sock(sk); before returning? It gets called > in tcp_msg_wait_data() but then it calls lock_sock() again so Smatch > and I think that cancels out. Indeed you are right, we need something alike: copied = data; goto release; } > > 312 if (data && !sk_psock_queue_empty(psock)) > 313 goto msg_bytes_ready; > 314 copied = -EAGAIN; > 315 } > 316 out: > 317 if (!peek) > 318 WRITE_ONCE(tcp->copied_seq, seq); > 319 tcp_rcv_space_adjust(sk); > 320 if (copied > 0) > 321 __tcp_cleanup_rbuf(sk, copied); release: > 322 release_sock(sk); > 323 sk_psock_put(sk, psock); > --> 324 return copied; > 325 } and something similar in tcp_bpf_recvmsg(). I can send a patch (or you can go ahead if you prefer). Thanks! Paolo