The following tests are added to verifier_ldsx.c: - sign extension of data/data_end/data_meta for tcx programs. The actual checking is in bpf_skb_is_valid_access() which is called by sk_filter, cg_skb, lwt, tc(tcx) and sk_skb. - sign extension of data/data_end/data_meta for xdp programs. - sign extension of data/data_end for flow_dissector programs. All newly-added tests have verification failure with message "invalid bpf_context access". Without previous patch, all these tests succeeded verification. Signed-off-by: Yonghong Song <yonghong.song@xxxxxxxxx> --- .../selftests/bpf/progs/verifier_ldsx.c | 168 ++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index d4427d8e1217..982b6c9c0b6d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -144,6 +144,174 @@ __naked void ldsx_s32_range(void) : __clobber_all); } +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_1(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data]);" + "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_2(void) +{ + asm volatile ( + "r2 = *(u32 *)(r1 + %[xdp_md_data]);" + "r3 = *(s32 *)(r1 + %[xdp_md_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_3(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);" + "r3 = *(u32 *)(r1 + %[xdp_md_data]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_4(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r3 = *(u32 *)(r1 + %[sk_buff_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_5(void) +{ + asm volatile ( + "r2 = *(u32 *)(r1 + %[sk_buff_data]);" + "r3 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_6(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);" + "r3 = *(u32 *)(r1 + %[sk_buff_data]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta)), + __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_7(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r3 = *(u32 *)(r1 + %[sk_buff_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_8(void) +{ + asm volatile ( + "r2 = *(u32 *)(r1 + %[sk_buff_data]);" + "r3 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 > r3 goto l0_%=;" + "r0 = *(u64 *)(r1 - 8);" +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + #else SEC("socket") -- 2.43.0