On Wed, Mar 15, 2023 at 11:39 PM Manu Bretelle <chantr4@xxxxxxxxx> wrote: > > Currently, test_progs outputs all stdout/stderr as it runs, and when it > is done, prints a summary. > > It is non-trivial for tooling to parse that output and extract meaningful > information from it. > > This change adds a new option, `--json-summary`/`-J` that let the caller > specify a file where `test_progs{,-no_alu32}` can write a summary of the > run in a json format that can later be parsed by tooling. > > Currently, it creates a summary section with successes/skipped/failures > followed by a list of failed tests/subtests. > > A test contains the following fields: > - test_name: the name of the test > - test_number: the number of the test > - message: the log message that was printed by the test. > - failed: A boolean indicating whether the test failed or not. Currently > we only output failed tests, but in the future, successful tests could > be added. > > A subtest contains the following fields: > - test_name: same as above > - test_number: sanme as above > - subtest_name: the name of the subtest > - subtest_number: the number of the subtest. > - message: the log message that was printed by the subtest. > - is_subtest: a boolean indicating that the entry is a subtest. "is_" prefix stands out compared to other bool field ("failed"), should we call this just "subtest" for consistency? > - failed: same as above but for the subtest > > ``` > $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print > $1","}' | tr -d '\n') -j -J /tmp/test_progs.json > $ jq . < /tmp/test_progs.json | head -n 30 > $ head -n 30 /tmp/test_progs.json > { > "success": 29, > "success_subtest": 23, > "skipped": 3, > "failed": 27, > "results": [{ > "test_name": "bpf_cookie", > "test_number": 10, > "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n", > "failed": true > },{ > "test_name": "bpf_cookie", > "subtest_name": "multi_kprobe_link_api", > "test_number": 10, > "subtest_number": 2, > "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms > 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not > resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed > to load BPF skeleton 'kprobe_multi': > -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected > error: -3\n", > "is_subtest": true, > "failed": true > },{ > "test_name": "bpf_cookie", > "subtest_name": "multi_kprobe_attach_api", > "test_number": 10, > "subtest_number": 3, > "message": "libbpf: extern 'bpf_testmod_fentry_test1' > (strong): not resolved\nlibbpf: failed to load object > 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi': > -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected > error: -3\n", > "is_subtest": true, > "failed": true > },{ > "test_name": "bpf_cookie", > "subtest_name": "lsm", > "test_number": 10, > ``` > > The file can then be used to print a summary of the test run and list of > failing tests/subtests: > > ``` > $ jq -r < /tmp/test_progs.json '"Success: > \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed: > \(.failed)"' > > Success: 29/23, Skipped: 3, Failed: 27 > $ jq -r < > /tmp/test_progs.json '.results[] | if .is_subtest then > "#\(.test_number)/\(.subtest_number) \(.test_name)/\(.subtest_name)" > else "#\(.test_number) \(.test_name)" end' > ``` > > Signed-off-by: Manu Bretelle <chantr4@xxxxxxxxx> > --- Looks great, some nits below. > tools/testing/selftests/bpf/Makefile | 4 +- > tools/testing/selftests/bpf/json_writer.c | 1 + > tools/testing/selftests/bpf/json_writer.h | 1 + > tools/testing/selftests/bpf/test_progs.c | 83 +++++++++++++++++++++-- > tools/testing/selftests/bpf/test_progs.h | 1 + > 5 files changed, 84 insertions(+), 6 deletions(-) > create mode 120000 tools/testing/selftests/bpf/json_writer.c > create mode 120000 tools/testing/selftests/bpf/json_writer.h > [...] > @@ -269,10 +270,22 @@ static void print_subtest_name(int test_num, int subtest_num, > fprintf(env.stdout, "\n"); > } > > +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) > +{ > + // open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a > + // null byte. Yet in parralel mode, log_buf will be NULL if there is no message. please don't use C++-style comments, let's use /* */ consistently also typo: parallel > + if (log_cnt) { > + jsonw_string_field(w, "message", log_buf); > + } else { > + jsonw_string_field(w, "message", ""); > + } > +} > + [...] > @@ -1283,7 +1327,7 @@ static void *dispatch_thread(void *ctx) > } while (false); > > pthread_mutex_lock(&stdout_output_lock); > - dump_test_log(test, state, false, true); > + dump_test_log(test, state, false, true, NULL); > pthread_mutex_unlock(&stdout_output_lock); > } /* while (true) */ > error: > @@ -1322,8 +1366,28 @@ static void calculate_summary_and_print_errors(struct test_env *env) > fail_cnt++; > else > succ_cnt++; > + > + } > + > + json_writer_t *w = NULL; please declare variable at the top to follow C89 style > + > + if (env->json) { > + w = jsonw_new(env->json); > + if (!w) > + fprintf(env->stderr, "Failed to create new JSON stream."); > } > > + if (w) { > + jsonw_pretty(w, 1); true, it's bool > + jsonw_start_object(w); > + jsonw_uint_field(w, "success", succ_cnt); > + jsonw_uint_field(w, "success_subtest", sub_succ_cnt); > + jsonw_uint_field(w, "skipped", skip_cnt); > + jsonw_uint_field(w, "failed", fail_cnt); > + jsonw_name(w, "results"); > + jsonw_start_array(w); > + > + } [...]