TX and RX workers can fail in many places. These failures trigger a call to exit_on_error() which exits the program immediately and can lead to memory leak. Add return value to functions that can fail. Handle failures more smoothly through report_failure(). Signed-off-by: Bastien Curutchet (eBPF Foundation) <bastien.curutchet@xxxxxxxxxxx> --- tools/testing/selftests/bpf/xskxceiver.c | 89 +++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 29 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 5b96f6860ff98de3c6160a1b94ae865a12121382..5b1b05c21a04e05673d01855567320452db1f9c5 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -235,24 +235,26 @@ static void umem_reset_alloc(struct xsk_umem_info *umem) umem->next_buffer = 0; } -static void enable_busy_poll(struct xsk_socket_info *xsk) +static int enable_busy_poll(struct xsk_socket_info *xsk) { int sock_opt; sock_opt = 1; if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, (void *)&sock_opt, sizeof(sock_opt)) < 0) - exit_with_error(errno); + return -errno; sock_opt = 20; if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt, sizeof(sock_opt)) < 0) - exit_with_error(errno); + return -errno; sock_opt = xsk->batch_size; if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, (void *)&sock_opt, sizeof(sock_opt)) < 0) - exit_with_error(errno); + return -errno; + + return 0; } static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, @@ -1627,7 +1629,7 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject) return TEST_PASS; } -static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, +static int xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, struct xsk_umem_info *umem, bool tx) { int i, ret; @@ -1644,24 +1646,34 @@ static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobje /* Retry if it fails as xsk_socket__create() is asynchronous */ if (ctr >= SOCK_RECONF_CTR) - exit_with_error(-ret); + return ret; usleep(USLEEP_MAX); } - if (ifobject->busy_poll) - enable_busy_poll(&ifobject->xsk_arr[i]); + if (ifobject->busy_poll) { + ret = enable_busy_poll(&ifobject->xsk_arr[i]); + if (ret) + return ret; + } } + + return 0; } -static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) +static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) { - xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); + int ret = xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); + + if (ret) + return ret; ifobject->xsk = &ifobject->xsk_arr[0]; ifobject->xskmap = test->ifobj_rx->xskmap; memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); ifobject->umem->base_addr = 0; + + return 0; } -static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, +static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, bool fill_up) { u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; @@ -1675,7 +1687,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); if (ret != buffers_to_fill) - exit_with_error(ENOSPC); + return -ENOSPC; while (filled < buffers_to_fill) { struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); @@ -1703,9 +1715,11 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream pkt_stream_reset(pkt_stream); umem_reset_alloc(umem); + + return 0; } -static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) +static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject) { u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; @@ -1722,27 +1736,34 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (bufs == MAP_FAILED) - exit_with_error(errno); + return -errno; ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); if (ret) - exit_with_error(-ret); + return ret; - xsk_configure_socket(test, ifobject, ifobject->umem, false); + ret = xsk_configure_socket(test, ifobject, ifobject->umem, false); + if (ret) + return ret; ifobject->xsk = &ifobject->xsk_arr[0]; if (!ifobject->rx_on) - return; + return 0; - xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring); + ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, + ifobject->use_fill_ring); + if (ret) + return ret; for (i = 0; i < test->nb_sockets; i++) { ifobject->xsk = &ifobject->xsk_arr[i]; ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i); if (ret) - exit_with_error(errno); + return ret; } + + return 0; } static void *worker_testapp_validate_tx(void *arg) @@ -1752,10 +1773,17 @@ static void *worker_testapp_validate_tx(void *arg) int err; if (test->current_step == 1) { - if (!ifobject->shared_umem) - thread_common_ops(test, ifobject); - else - thread_common_ops_tx(test, ifobject); + if (!ifobject->shared_umem) { + if (thread_common_ops(test, ifobject)) { + report_failure(test); + pthread_exit(NULL); + } + } else { + if (thread_common_ops_tx(test, ifobject)) { + report_failure(test); + pthread_exit(NULL); + } + } } err = send_pkts(test, ifobject); @@ -1775,19 +1803,22 @@ static void *worker_testapp_validate_rx(void *arg) int err; if (test->current_step == 1) { - thread_common_ops(test, ifobject); + err = thread_common_ops(test, ifobject); } else { xsk_clear_xskmap(ifobject->xskmap); err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0); - if (err) { - print_msg("Error: Failed to update xskmap, error %s\n", - strerror(-err)); - exit_with_error(-err); - } + if (err) + print_msg("Error: Failed to update xskmap, error %s\n", strerror(-err)); } pthread_barrier_wait(&barr); + /* We leave only now in case of error to avoid getting stuck in the barrier */ + if (err) { + report_failure(test); + pthread_exit(NULL); + } + err = receive_pkts(test); if (!err && ifobject->validation_func) -- 2.48.1