On 12/10/22 11:35 AM, Daan De Meyer wrote:
---
include/linux/bpf-cgroup.h | 124 +++++++++++++++++----------------
include/linux/filter.h | 1 +
include/uapi/linux/bpf.h | 1 +
kernel/bpf/cgroup.c | 3 +
net/core/filter.c | 51 ++++++++++++++
net/ipv4/af_inet.c | 9 +--
net/ipv4/ping.c | 2 +-
net/ipv4/tcp_ipv4.c | 2 +-
net/ipv4/udp.c | 11 +--
net/ipv6/af_inet6.c | 9 +--
net/ipv6/ping.c | 2 +-
net/ipv6/tcp_ipv6.c | 2 +-
net/ipv6/udp.c | 12 ++--
tools/include/uapi/linux/bpf.h | 1 +
14 files changed, 146 insertions(+), 84 deletions(-)
Again, please add some commit message for this patch. The same for
a few other following patches.
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 57e9e109257e..3ab2f06ddc8a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -120,6 +120,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
+ int *uaddrlen,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
@@ -230,75 +231,76 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled(atype)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, NULL); \
- __ret; \
-})
-
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled(atype)) { \
- lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- t_ctx, NULL); \
- release_sock(sk); \
- } \
- __ret; \
-})
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
+ ({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr( \
+ sk, uaddr, uaddrlen, atype, NULL, NULL); \
+ __ret; \
+ })
+
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
+ ({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr( \
+ sk, uaddr, uaddrlen, atype, t_ctx, NULL); \
+ release_sock(sk); \
+ } \
+ __ret; \
+ })
[...]
diff --git a/net/core/filter.c b/net/core/filter.c
index 8607136b6e2c..d0620927dbca 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8876,6 +8876,13 @@ static bool sock_addr_is_valid_access(int off, int size,
return false;
info->reg_type = PTR_TO_SOCKET;
break;
+ case bpf_ctx_range(struct bpf_sock_addr, user_addrlen):
+ if (type != BPF_READ)
+ return false;
+
+ if (size != sizeof(__u32))
+ return false;
+ break;
default:
if (type == BPF_READ) {
if (size != size_default)
@@ -9909,6 +9916,7 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
{
int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
struct bpf_insn *insn = insn_buf;
+ u32 read_size;
switch (si->off) {
case offsetof(struct bpf_sock_addr, user_family):
@@ -9986,6 +9994,49 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_addr_kern, sk));
break;
+
+ case offsetof(struct bpf_sock_addr, user_addrlen):
+ /* uaddrlen is a pointer so it should be accessed via indirect
+ * loads and stores. Also for stores additional temporary
+ * register is used since neither src_reg nor dst_reg can be
+ * overridden.
+ */
+ if (type == BPF_WRITE) {
In the above, we have
> + case bpf_ctx_range(struct bpf_sock_addr, user_addrlen):
> + if (type != BPF_READ)
> + return false;
> +
> + if (size != sizeof(__u32))
> + return false;
> + break;
So let us delay BPF_WRITE later once the write is enabled.
+ int treg = BPF_REG_9;
+
+ if (si->src_reg == treg || si->dst_reg == treg)
+ --treg;
+ if (si->src_reg == treg || si->dst_reg == treg)
+ --treg;
+ *insn++ = BPF_STX_MEM(
+ BPF_DW, si->dst_reg, treg,
+ offsetof(struct bpf_sock_addr_kern, tmp_reg));
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern,
+ uaddrlen),
+ treg, si->dst_reg,
+ offsetof(struct bpf_sock_addr_kern, uaddrlen));
+ *insn++ = BPF_STX_MEM(
+ BPF_SIZEOF(u32), treg, si->src_reg,
+ bpf_ctx_narrow_access_offset(0, sizeof(u32),
+ sizeof(int)));
+ *insn++ = BPF_LDX_MEM(
+ BPF_DW, treg, si->dst_reg,
+ offsetof(struct bpf_sock_addr_kern, tmp_reg));
+ } else {
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern,
+ uaddrlen),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_addr_kern, uaddrlen));
+ read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
+ *insn++ = BPF_LDX_MEM(
+ BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
+ bpf_ctx_narrow_access_offset(0, read_size,
+ sizeof(int)));
+ }
+ *target_size = sizeof(u32);
+ break;
}
return insn - insn_buf;
[...]