Add selftests for the bpf_cpumask_fill helper that sets a bpf_cpumask to a bit pattern provided by a BPF program. Signed-off-by: Emil Tsalapatis (Meta) <emil@xxxxxxxxxxxxxxx> --- .../selftests/bpf/progs/cpumask_failure.c | 38 ++++++ .../selftests/bpf/progs/cpumask_success.c | 114 ++++++++++++++++++ 2 files changed, 152 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index b40b52548ffb..8a2fd596c8a3 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -222,3 +222,41 @@ int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flag return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("type=scalar expected=fp") +int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456; + u64 bits; + int ret; + + ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits)); + if (!ret) + err = 2; + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("leads to invalid memory access") +int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags) +{ + void *garbage = (void *)0x123456; + struct bpf_cpumask *local; + int ret; + + local = create_cpumask(); + if (!local) { + err = 1; + return 0; + } + + ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8); + if (!ret) + err = 2; + + bpf_cpumask_release(local); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 80ee469b0b60..5dc0fe9940dc 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -757,6 +757,7 @@ int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_fl mask1 = bpf_cpumask_create(); mask2 = bpf_cpumask_create(); + if (!mask1 || !mask2) goto free_masks_return; @@ -770,3 +771,116 @@ int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_fl bpf_cpumask_release(mask2); return 0; } + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local; + u8 toofewbits; + int ret; + + local = create_cpumask(); + if (!local) + return 0; + + /* The kfunc should prevent this operation */ + ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits)); + if (ret != -EACCES) + err = 2; + + bpf_cpumask_release(local); + + return 0; +} + +/* Mask is guaranteed to be large enough for bpf_cpumask_t. */ +#define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t)) + +/* Add an extra word for the test_populate_reject_unaligned test. */ +u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1]; +extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak; + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask; + char *src; + int ret; + + /* Skip if unaligned accesses are fine for this arch. */ + if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return 0; + + mask = bpf_cpumask_create(); + if (!mask) { + err = 1; + return 0; + } + + /* Misalign the source array by a byte. */ + src = &((char *)bits)[1]; + + ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN); + if (ret != -EINVAL) + err = 2; + + bpf_cpumask_release(mask); + + return 0; +} + + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask; + bool bit; + int ret; + int i; + + /* Set only odd bits. */ + __builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN); + + mask = bpf_cpumask_create(); + if (!mask) { + err = 1; + return 0; + } + + /* Pass the entire bits array, the kfunc will only copy the valid bits. */ + ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN); + if (ret) { + err = 2; + goto out; + } + + /* + * Test is there to appease the verifier. We cannot directly + * access NR_CPUS, the upper bound for nr_cpus, so we infer + * it from the size of cpumask_t. + */ + if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) { + err = 3; + goto out; + } + + bpf_for(i, 0, nr_cpus) { + /* Odd-numbered bits should be set, even ones unset. */ + bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask); + if (bit == (i % 2 != 0)) + continue; + + err = 4; + break; + } + +out: + bpf_cpumask_release(mask); + + return 0; +} + +#undef CPUMASK_TEST_MASKLEN -- 2.47.1