Re: [PATCH v3 bpf-next 5/5] selftests/bpf: add selftest for bpf_xdp_ct_add and bpf_ct_refresh_timeout kfunc

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 5/18/22 3:43 AM, Lorenzo Bianconi wrote:
Introduce selftests for the following kfunc helpers:
- bpf_xdp_ct_add
- bpf_skb_ct_add
- bpf_ct_refresh_timeout

Signed-off-by: Lorenzo Bianconi <lorenzo@xxxxxxxxxx>
---
  .../testing/selftests/bpf/prog_tests/bpf_nf.c |  4 ++
  .../testing/selftests/bpf/progs/test_bpf_nf.c | 72 +++++++++++++++----
  2 files changed, 64 insertions(+), 12 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index dd30b1e3a67c..be6c5650892f 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -39,6 +39,10 @@ void test_bpf_nf_ct(int mode)
  	ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id");
  	ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup");
  	ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple");
+	ASSERT_EQ(skel->bss->test_add_entry, 0, "Test for adding new entry");
+	ASSERT_EQ(skel->bss->test_succ_lookup, 0, "Test for successful lookup");

The default value for test_add_entry/test_succ_lookup are 0. So even if the program didn't execute, the above still succeeds. So testing with
a non-default value (0) might be a better choice.

+	ASSERT_TRUE(skel->bss->test_delta_timeout > 9 && skel->bss->test_delta_timeout <= 10,
+		    "Test for ct timeout update");

ASSERT_TRUE(skel->bss->test_delta_timeout == 10, ...)? Could you add some comments on why the value should be 10. It is not obvious by
inspecting the code.

  end:
  	test_bpf_nf__destroy(skel);
  }
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index f00a9731930e..361430dde3f7 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -1,6 +1,7 @@
  // SPDX-License-Identifier: GPL-2.0
  #include <vmlinux.h>
  #include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
#define EAFNOSUPPORT 97
  #define EPROTO 71
@@ -8,6 +9,8 @@
  #define EINVAL 22
  #define ENOENT 2
+extern unsigned long CONFIG_HZ __kconfig;
+
  int test_einval_bpf_tuple = 0;
  int test_einval_reserved = 0;
  int test_einval_netns_id = 0;
@@ -16,6 +19,9 @@ int test_eproto_l4proto = 0;
  int test_enonet_netns_id = 0;
  int test_enoent_lookup = 0;
  int test_eafnosupport = 0;
+int test_add_entry = 0;
+int test_succ_lookup = 0;
+u32 test_delta_timeout = 0;
struct nf_conn; @@ -26,31 +32,40 @@ struct bpf_ct_opts___local {
  	u8 reserved[3];
  } __attribute__((preserve_access_index));
+struct nf_conn *bpf_xdp_ct_add(struct xdp_md *, struct bpf_sock_tuple *, u32,
+			       struct bpf_ct_opts___local *, u32) __ksym;
  struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
  				  struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_add(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+			       struct bpf_ct_opts___local *, u32) __ksym;
  struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
  				  struct bpf_ct_opts___local *, u32) __ksym;
  void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_refresh_timeout(struct nf_conn *, u32) __ksym;
static __always_inline void
-nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
-				   struct bpf_ct_opts___local *, u32),
+nf_ct_test(struct nf_conn *(*look_fn)(void *, struct bpf_sock_tuple *, u32,
+				      struct bpf_ct_opts___local *, u32),
+	   struct nf_conn *(*add_fn)(void *, struct bpf_sock_tuple *, u32,
+				     struct bpf_ct_opts___local *, u32),
  	   void *ctx)
  {
  	struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
  	struct bpf_sock_tuple bpf_tuple;
  	struct nf_conn *ct;
+	int err;
__builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); - ct = func(ctx, NULL, 0, &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_einval_bpf_tuple = opts_def.error;
opts_def.reserved[0] = 1;
-	ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def));
  	opts_def.reserved[0] = 0;
  	opts_def.l4proto = IPPROTO_TCP;
  	if (ct)
@@ -59,21 +74,24 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
  		test_einval_reserved = opts_def.error;
opts_def.netns_id = -2;
-	ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def));
  	opts_def.netns_id = -1;
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_einval_netns_id = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def) - 1);
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def) - 1);
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_einval_len_opts = opts_def.error;
opts_def.l4proto = IPPROTO_ICMP;
-	ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def));
  	opts_def.l4proto = IPPROTO_TCP;
  	if (ct)
  		bpf_ct_release(ct);
@@ -81,37 +99,67 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
  		test_eproto_l4proto = opts_def.error;
opts_def.netns_id = 0xf00f;
-	ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def));
  	opts_def.netns_id = -1;
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_enonet_netns_id = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		     sizeof(opts_def));
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_enoent_lookup = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def, sizeof(opts_def));
+	ct = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
+		     sizeof(opts_def));
  	if (ct)
  		bpf_ct_release(ct);
  	else
  		test_eafnosupport = opts_def.error;
+
+	bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+	bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+	bpf_tuple.ipv4.sport = bpf_htons(bpf_get_prandom_u32()); /* src port */
+	bpf_tuple.ipv4.dport = bpf_htons(bpf_get_prandom_u32()); /* dst port */

Since it is already random number, bpf_htons is not needed here.
bpf_endian.h can also be removed.

+
+	ct = add_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+		    sizeof(opts_def));
+	if (ct) {
+		struct nf_conn *ct_lk;
+
+		ct_lk = look_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+				&opts_def, sizeof(opts_def));
+		if (ct_lk) {
+			/* update ct entry timeout */
+			bpf_ct_refresh_timeout(ct_lk, 10000);
+			test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
+			test_delta_timeout /= CONFIG_HZ;
+			bpf_ct_release(ct_lk);
+		} else {
+			test_succ_lookup = opts_def.error;
+		}
+		bpf_ct_release(ct);
+	} else {
+		test_add_entry = opts_def.error;
+		test_succ_lookup = opts_def.error;
+	}
  }
[...]



[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux