get_cpu_cycles() is replaced with rdtsc instruction on x86_64. Add tests to check it. Acked-by: Eduard Zingerman <eddyz87@xxxxxxxxx> Signed-off-by: Vadim Fedorenko <vadfed@xxxxxxxx> --- .../selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_cpu_cycles.c | 104 ++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_cpu_cycles.c diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d9f65adb456b..6cbb8949164a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -98,6 +98,7 @@ #include "verifier_xdp_direct_packet_access.skel.h" #include "verifier_bits_iter.skel.h" #include "verifier_lsm.skel.h" +#include "verifier_cpu_cycles.skel.h" #define MAX_ENTRIES 11 @@ -225,6 +226,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } void test_verifier_lsm(void) { RUN(verifier_lsm); } +void test_verifier_cpu_cycles(void) { RUN(verifier_cpu_cycles); } void test_verifier_mtu(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_cpu_cycles.c b/tools/testing/selftests/bpf/progs/verifier_cpu_cycles.c new file mode 100644 index 000000000000..88bfa7211858 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cpu_cycles.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Inc. */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" + +extern u64 bpf_cpu_cycles_to_ns(u64 cycles) __weak __ksym; +extern u64 bpf_get_cpu_cycles(void) __weak __ksym; + +SEC("syscall") +__arch_x86_64 +__xlated("0: call kernel-function") +__naked int bpf_rdtsc(void) +{ + asm volatile( + "call %[bpf_get_cpu_cycles];" + "exit" + : + : __imm(bpf_get_cpu_cycles) + : __clobber_all + ); +} + +SEC("syscall") +__arch_x86_64 +/* program entry for bpf_rdtsc_jit_x86_64(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +__jited(" endbr64") +/* save RDX in R11 as it will be overwritten */ +__jited(" movq %rdx, %r11") +/* lfence may not be executed depending on cpu features */ +__jited(" {{(lfence|)}}") +__jited(" rdtsc") +/* combine EDX:EAX into RAX */ +__jited(" shlq ${{(32|0x20)}}, %rdx") +__jited(" orq %rdx, %rax") +/* restore RDX from R11 */ +__jited(" movq %r11, %rdx") +__jited(" leave") +__naked int bpf_rdtsc_jit_x86_64(void) +{ + asm volatile( + "call %[bpf_get_cpu_cycles];" + "exit" + : + : __imm(bpf_get_cpu_cycles) + : __clobber_all + ); +} + +SEC("syscall") +__arch_x86_64 +__xlated("0: r1 = 42") +__xlated("1: call kernel-function") +__naked int bpf_cyc2ns(void) +{ + asm volatile( + "r1=0x2a;" + "call %[bpf_cpu_cycles_to_ns];" + "exit" + : + : __imm(bpf_cpu_cycles_to_ns) + : __clobber_all + ); +} + +SEC("syscall") +__arch_x86_64 +/* program entry for bpf_rdtsc_jit_x86_64(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +__jited(" endbr64") +/* save RDX in R11 as it will be overwritten */ +__jited(" movabsq $0x2a2a2a2a2a, %rdi") +__jited(" imulq ${{.*}}, %rdi, %rax") +__jited(" shrq ${{.*}}, %rax") +__jited(" leave") +__naked int bpf_cyc2ns_jit_x86(void) +{ + asm volatile( + "r1=0x2a2a2a2a2a ll;" + "call %[bpf_cpu_cycles_to_ns];" + "exit" + : + : __imm(bpf_cpu_cycles_to_ns) + : __clobber_all + ); +} + +void rdtsc(void) +{ + bpf_get_cpu_cycles(); + bpf_cpu_cycles_to_ns(42); +} + +char _license[] SEC("license") = "GPL"; -- 2.43.5