On 8/4/21 12:08 AM, Kuniyuki Iwashima wrote:
If there are no abstract sockets, this prog can output the same result
compared to /proc/net/unix.
# cat /sys/fs/bpf/unix | head -n 2
Num RefCount Protocol Flags Type St Inode Path
ffff9ab7122db000: 00000002 00000000 00010000 0001 01 10623 private/defer
# cat /proc/net/unix | head -n 2
Num RefCount Protocol Flags Type St Inode Path
ffff9ab7122db000: 00000002 00000000 00010000 0001 01 10623 private/defer
According to the analysis by Yonghong Song (See the link), the BPF verifier
cannot load the code in the comment to print the name of the abstract UNIX
domain socket due to LLVM optimisation. It can be uncommented once the
LLVM code gen is improved.
I have pushed the llvm fix to llvm14 trunk
(https://reviews.llvm.org/D107483), and filed a request to backport to
llvm13 (https://bugs.llvm.org/show_bug.cgi?id=51363), could you in the
next revision uncomment the "for" loop code and tested it with latest
llvm trunk compiler? Please also add an entry in selftests/bpf/README.rst
to mention the llvm commit https://reviews.llvm.org/D107483 is needed
for bpf_iter unix_socket selftest, otherwise, they will see an error
like ...
Link: https://lore.kernel.org/netdev/1994df05-8f01-371f-3c3b-d33d7836878c@xxxxxx/
Signed-off-by: Kuniyuki Iwashima <kuniyu@xxxxxxxxxxxx>
---
.../selftests/bpf/prog_tests/bpf_iter.c | 16 ++++
tools/testing/selftests/bpf/progs/bpf_iter.h | 8 ++
.../selftests/bpf/progs/bpf_iter_unix.c | 86 +++++++++++++++++++
.../selftests/bpf/progs/bpf_tracing_net.h | 4 +
4 files changed, 114 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_unix.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 1f1aade56504..77ac24b191d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -13,6 +13,7 @@
#include "bpf_iter_tcp6.skel.h"
#include "bpf_iter_udp4.skel.h"
#include "bpf_iter_udp6.skel.h"
+#include "bpf_iter_unix.skel.h"
#include "bpf_iter_test_kern1.skel.h"
#include "bpf_iter_test_kern2.skel.h"
#include "bpf_iter_test_kern3.skel.h"
@@ -313,6 +314,19 @@ static void test_udp6(void)
bpf_iter_udp6__destroy(skel);
}
+static void test_unix(void)
+{
+ struct bpf_iter_unix *skel;
+
+ skel = bpf_iter_unix__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_unix);
+
+ bpf_iter_unix__destroy(skel);
+}
+
[...]
+ if (unix_sk->addr) {
+ if (!UNIX_ABSTRACT(unix_sk)) {
+ BPF_SEQ_PRINTF(seq, " %s", unix_sk->addr->name->sun_path);
+ } else {
+ BPF_SEQ_PRINTF(seq, " @");
+
+ /* The name of the abstract UNIX domain socket starts
+ * with '\0' and can contain '\0'. The null bytes
+ * should be escaped as done in unix_seq_show().
+ * However, the BPF verifier cannot load the code below
+ * because of the optimisation by LLVM. So, print only
+ * the first escaped byte here for now. Once LLVM code
+ * gen is improved, remove the BPF_SEQ_PRINTF() above
+ * and uncomment the code below.
+ *
+ * int i, len;
+ *
+ * len = unix_sk->addr->len - sizeof(short);
+ *
+ * BPF_SEQ_PRINTF(seq, " @");
+ *
+ * // unix_mkname() tests this upper bound.
+ * if (len < sizeof(struct sockaddr_un))
+ * for (i = 1 ; i < len; i++)
+ * BPF_SEQ_PRINTF(seq, "%c",
+ * unix_sk->addr->name->sun_path[i] ?:
+ * '@');
+ */
+ }
+ }
+
[...]