The BPF cpumask selftests are supposed to be run twice, once to ensure that they load properly and once to actually test their behavior. The load test is triggered by annotating the tests with __success, while the run test needs adding to tools/testing/selftests/bpf/prog_tests/cpumask.c the name of the new test. However, most existing tests are missing the __success annotation, and test_refcount_null_tracking is missing from the main test file. Add the missing annotations and test name. Signed-off-by: Emil Tsalapatis (Meta) <emil@xxxxxxxxxxxxxxx> --- .../testing/selftests/bpf/prog_tests/cpumask.c | 1 + .../selftests/bpf/progs/cpumask_success.c | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index 9b09beba988b..447a6e362fcd 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -25,6 +25,7 @@ static const char * const cpumask_success_testcases[] = { "test_global_mask_nested_deep_rcu", "test_global_mask_nested_deep_array_rcu", "test_cpumask_weight", + "test_refcount_null_tracking", "test_populate_reject_small_mask", "test_populate_reject_unaligned", "test_populate", diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 51f3dcf8869f..8abae7a59f92 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -136,6 +136,7 @@ static bool create_cpumask_set(struct bpf_cpumask **out1, } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -152,6 +153,7 @@ int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -181,6 +183,7 @@ int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -210,6 +213,7 @@ int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -249,6 +253,7 @@ int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *mask1, *mask2; @@ -281,6 +286,7 @@ int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -313,6 +319,7 @@ int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; @@ -360,6 +367,7 @@ int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; @@ -402,6 +410,7 @@ int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; @@ -456,6 +465,7 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -471,6 +481,7 @@ int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -501,6 +512,7 @@ int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_fla } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *local, *prev; @@ -534,6 +546,7 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *local, *prev; @@ -632,12 +645,14 @@ static int _global_mask_array_rcu(struct bpf_cpumask **mask0, } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags) { return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]); } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags) { return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]); @@ -670,6 +685,7 @@ int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_fl * incorrect offset. */ SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags) { int r, i; @@ -689,6 +705,7 @@ int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clo } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags) { int i; @@ -706,6 +723,7 @@ int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u } SEC("tp_btf/task_newtask") +__success int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *local; -- 2.47.1