A multithreaded test is added. Three threads repeatedly did: - batch update - batch lookup_and_delete - batch delete It is totally possible each batch element operation in kernel may find that the key, retrieved from bpf_map_get_next_key(), may fail lookup and/or delete as some other threads in parallel operates on the same map. The default mode for new batch APIs is to ignore -ENOENT errors in case of lookup and delete and move to the next element. The test would otherwise fail if the kernel reacts as -ENOENT as a real error and propogates it back to user space. Signed-off-by: Yonghong Song <yhs@xxxxxx> --- .../selftests/bpf/map_tests/map_batch_mt.c | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 tools/testing/selftests/bpf/map_tests/map_batch_mt.c diff --git a/tools/testing/selftests/bpf/map_tests/map_batch_mt.c b/tools/testing/selftests/bpf/map_tests/map_batch_mt.c new file mode 100644 index 000000000000..a0e2591d0079 --- /dev/null +++ b/tools/testing/selftests/bpf/map_tests/map_batch_mt.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <pthread.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include <test_maps.h> + +/* Create three threads. Each thread will iteratively do: + * . update constantly + * . lookup and delete constantly + * . delete constantly + * So this will make lookup and delete operations + * may fail as the elements may be deleted by another + * thread. + * + * By default, we should not see a problem as + * -ENOENT for bpf_map_delete_elem() and bpf_map_lookup_elem() + * will be ignored. But with flag, BPF_F_ENFORCE_ENOENT + * we may see errors. + */ + +static int map_fd; +static const __u32 max_entries = 10; +static volatile bool stop = false; + +static void do_batch_update() +{ + int i, err, keys[max_entries], values[max_entries]; + __u32 count; + + for (i = 0; i < max_entries; i++) { + keys[i] = i + 1; + values[i] = i + 2; + } + + while (!stop) { + count = max_entries; + err = bpf_map_update_batch(map_fd, keys, values, &count, 0, 0); + CHECK(err, "bpf_map_update_batch()", "error:%s\n", + strerror(errno)); + } +} + +static void do_batch_delete() +{ + __u32 count; + int err; + + while (!stop) { + count = 0; + err = bpf_map_delete_batch(map_fd, NULL, NULL, NULL, &count, + 0, 0); + CHECK(err, "bpf_map_delete_batch()", "error:%s\n", + strerror(errno)); + } +} + +static void do_batch_lookup_and_delete() +{ + int err, key, keys[max_entries], values[max_entries]; + __u32 count; + void *p_key; + + while (!stop) { + p_key = &key; + count = max_entries; + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &p_key, + keys, values, &count, + 0, 0); + CHECK(err, "bpf_map_lookup_and_delete_batch()", "error:%s\n", + strerror(errno)); + } +} + +static void *do_work(void *arg) +{ + int work_index = (int)(long)arg; + + if (work_index == 0) + do_batch_update(); + else if (work_index == 1) + do_batch_delete(); + else + do_batch_lookup_and_delete(); + + return NULL; +} + +void test_map_batch_mt(void) +{ + struct bpf_create_map_attr xattr = { + .name = "hash_map", + .map_type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(int), + .value_size = sizeof(int), + }; + const int nr_threads = 3; + pthread_t threads[nr_threads]; + int i, err; + + xattr.max_entries = max_entries; + map_fd = bpf_create_map_xattr(&xattr); + CHECK(map_fd == -1, + "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + + for (i = 0; i < nr_threads; i++) { + err = pthread_create(&threads[i], NULL, do_work, + (void *)(long)i); + CHECK(err, "pthread_create", "error: %s\n", strerror(errno)); + } + + sleep(1); + stop = true; + + for (i = 0; i < nr_threads; i++) + pthread_join(threads[i], NULL); + + close(map_fd); + + printf("%s:PASS\n", __func__); +} -- 2.17.1