The tests check if ctx and data are correctly prepared from ctx_in and data_in, so accessing the ctx and using the bpf_perf_prog_read_value work as expected. Signed-off-by: Krzesimir Nowak <krzesimir@xxxxxxxxxx> --- tools/testing/selftests/bpf/test_verifier.c | 48 ++++++++++ .../selftests/bpf/verifier/perf_event_run.c | 93 +++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 tools/testing/selftests/bpf/verifier/perf_event_run.c diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 05bad54f481f..6fa962014b64 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -293,6 +293,54 @@ static void bpf_fill_scale(struct bpf_test *self) } } +static void bpf_fill_perf_event_test_run_check(struct bpf_test *self) +{ + compiletime_assert( + sizeof(struct bpf_perf_event_data) <= TEST_CTX_LEN, + "buffer for ctx is too short to fit struct bpf_perf_event_data"); + compiletime_assert( + sizeof(struct bpf_perf_event_value) <= TEST_DATA_LEN, + "buffer for data is too short to fit struct bpf_perf_event_value"); + + struct bpf_perf_event_data ctx = { + .regs = (bpf_user_pt_regs_t) { + .r15 = 1, + .r14 = 2, + .r13 = 3, + .r12 = 4, + .rbp = 5, + .rbx = 6, + .r11 = 7, + .r10 = 8, + .r9 = 9, + .r8 = 10, + .rax = 11, + .rcx = 12, + .rdx = 13, + .rsi = 14, + .rdi = 15, + .orig_rax = 16, + .rip = 17, + .cs = 18, + .eflags = 19, + .rsp = 20, + .ss = 21, + }, + .sample_period = 1, + .addr = 2, + }; + struct bpf_perf_event_value data = { + .counter = 1, + .enabled = 2, + .running = 3, + }; + + memcpy(self->ctx, &ctx, sizeof(ctx)); + memcpy(self->data, &data, sizeof(data)); + free(self->fill_insns); + self->fill_insns = NULL; +} + /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ #define BPF_SK_LOOKUP(func) \ /* struct bpf_sock_tuple tuple = {} */ \ diff --git a/tools/testing/selftests/bpf/verifier/perf_event_run.c b/tools/testing/selftests/bpf/verifier/perf_event_run.c new file mode 100644 index 000000000000..d451932a6fc0 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/perf_event_run.c @@ -0,0 +1,93 @@ +#define PER_LOAD_AND_CHECK_PTREG(PT_REG_FIELD, VALUE) \ + PER_LOAD_AND_CHECK_CTX(offsetof(bpf_user_pt_regs_t, PT_REG_FIELD), VALUE) +#define PER_LOAD_AND_CHECK_EVENT(PED_FIELD, VALUE) \ + PER_LOAD_AND_CHECK_CTX(offsetof(struct bpf_perf_event_data, PED_FIELD), VALUE) +#define PER_LOAD_AND_CHECK_CTX(OFFSET, VALUE) \ + PER_LOAD_AND_CHECK_64(BPF_REG_4, BPF_REG_1, OFFSET, VALUE) +#define PER_LOAD_AND_CHECK_VALUE(PEV_FIELD, VALUE) \ + PER_LOAD_AND_CHECK_64(BPF_REG_7, BPF_REG_6, offsetof(struct bpf_perf_event_value, PEV_FIELD), VALUE) +#define PER_LOAD_AND_CHECK_64(DST, SRC, OFFSET, VALUE) \ + BPF_LDX_MEM(BPF_DW, DST, SRC, OFFSET), \ + BPF_JMP_IMM(BPF_JEQ, DST, VALUE, 2), \ + BPF_MOV64_IMM(BPF_REG_0, VALUE), \ + BPF_EXIT_INSN() + +{ + "check if regs contain expected values", + .insns = { + PER_LOAD_AND_CHECK_PTREG(r15, 1), + PER_LOAD_AND_CHECK_PTREG(r14, 2), + PER_LOAD_AND_CHECK_PTREG(r13, 3), + PER_LOAD_AND_CHECK_PTREG(r12, 4), + PER_LOAD_AND_CHECK_PTREG(rbp, 5), + PER_LOAD_AND_CHECK_PTREG(rbx, 6), + PER_LOAD_AND_CHECK_PTREG(r11, 7), + PER_LOAD_AND_CHECK_PTREG(r10, 8), + PER_LOAD_AND_CHECK_PTREG(r9, 9), + PER_LOAD_AND_CHECK_PTREG(r8, 10), + PER_LOAD_AND_CHECK_PTREG(rax, 11), + PER_LOAD_AND_CHECK_PTREG(rcx, 12), + PER_LOAD_AND_CHECK_PTREG(rdx, 13), + PER_LOAD_AND_CHECK_PTREG(rsi, 14), + PER_LOAD_AND_CHECK_PTREG(rdi, 15), + PER_LOAD_AND_CHECK_PTREG(orig_rax, 16), + PER_LOAD_AND_CHECK_PTREG(rip, 17), + PER_LOAD_AND_CHECK_PTREG(cs, 18), + PER_LOAD_AND_CHECK_PTREG(eflags, 19), + PER_LOAD_AND_CHECK_PTREG(rsp, 20), + PER_LOAD_AND_CHECK_PTREG(ss, 21), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + .ctx_len = sizeof(struct bpf_perf_event_data), + .data_len = sizeof(struct bpf_perf_event_value), + .fill_helper = bpf_fill_perf_event_test_run_check, +}, +{ + "check if sample period and addr contain expected values", + .insns = { + PER_LOAD_AND_CHECK_EVENT(sample_period, 1), + PER_LOAD_AND_CHECK_EVENT(addr, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + .ctx_len = sizeof(struct bpf_perf_event_data), + .data_len = sizeof(struct bpf_perf_event_value), + .fill_helper = bpf_fill_perf_event_test_run_check, +}, +{ + "check if bpf_perf_prog_read_value returns expected data", + .insns = { + // allocate space for a struct bpf_perf_event_value + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -(int)sizeof(struct bpf_perf_event_value)), + // prepare parameters for bpf_perf_prog_read_value(ctx, struct bpf_perf_event_value*, u32) + // BPF_REG_1 already contains the context + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_perf_event_value)), + BPF_EMIT_CALL(BPF_FUNC_perf_prog_read_value), + // check the return value + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + // check if the fields match the expected values + PER_LOAD_AND_CHECK_VALUE(counter, 1), + PER_LOAD_AND_CHECK_VALUE(enabled, 2), + PER_LOAD_AND_CHECK_VALUE(running, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + .ctx_len = sizeof(struct bpf_perf_event_data), + .data_len = sizeof(struct bpf_perf_event_value), + .fill_helper = bpf_fill_perf_event_test_run_check, +}, +#undef PER_LOAD_AND_CHECK_64 +#undef PER_LOAD_AND_CHECK_VALUE +#undef PER_LOAD_AND_CHECK_CTX +#undef PER_LOAD_AND_CHECK_EVENT +#undef PER_LOAD_AND_CHECK_PTREG -- 2.20.1