This patch adds u8 migration field to sk_reuseport_kern and sk_reuseport_md to signal the eBPF program if the kernel calls it for selecting a listener for SYN or migrating sockets in the accept queue or an immature socket during 3WHS. Note that this field is accessible only if the attached type is BPF_SK_REUSEPORT_SELECT_OR_MIGRATE. Link: https://lore.kernel.org/netdev/20201123003828.xjpjdtk4ygl6tg6h@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ Suggested-by: Martin KaFai Lau <kafai@xxxxxx> Signed-off-by: Kuniyuki Iwashima <kuniyu@xxxxxxxxxxxx> --- include/linux/bpf.h | 1 + include/linux/filter.h | 4 ++-- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 15 ++++++++++++--- net/core/sock_reuseport.c | 2 +- tools/include/uapi/linux/bpf.h | 1 + 6 files changed, 18 insertions(+), 6 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 581b2a2e78eb..244f823f1f84 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1897,6 +1897,7 @@ struct sk_reuseport_kern { u32 hash; u32 reuseport_id; bool bind_inany; + u8 migration; }; bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); diff --git a/include/linux/filter.h b/include/linux/filter.h index 1b62397bd124..15d5bf13a905 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -967,12 +967,12 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, - u32 hash); + u32 hash, u8 migration); #else static inline struct sock * bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, - u32 hash) + u32 hash, u8 migration) { return NULL; } diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index cfc207ae7782..efe342bf3dbc 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4419,6 +4419,7 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + __u8 migration; /* Migration type */ }; #define BPF_TAG_SIZE 8 diff --git a/net/core/filter.c b/net/core/filter.c index 2ca5eecebacf..0a0634787bb4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9853,7 +9853,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, struct sock_reuseport *reuse, struct sock *sk, struct sk_buff *skb, - u32 hash) + u32 hash, u8 migration) { reuse_kern->skb = skb; reuse_kern->sk = sk; @@ -9862,16 +9862,17 @@ static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, reuse_kern->hash = hash; reuse_kern->reuseport_id = reuse->reuseport_id; reuse_kern->bind_inany = reuse->bind_inany; + reuse_kern->migration = migration; } struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, - u32 hash) + u32 hash, u8 migration) { struct sk_reuseport_kern reuse_kern; enum sk_action action; - bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash); + bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash, migration); action = BPF_PROG_RUN(prog, &reuse_kern); if (action == SK_PASS) @@ -10010,6 +10011,10 @@ sk_reuseport_is_valid_access(int off, int size, case offsetof(struct sk_reuseport_md, hash): return size == size_default; + case bpf_ctx_range(struct sk_reuseport_md, migration): + return prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE && + size == sizeof(__u8); + /* Fields that allow narrowing */ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) @@ -10082,6 +10087,10 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, case offsetof(struct sk_reuseport_md, bind_inany): SK_REUSEPORT_LOAD_FIELD(bind_inany); break; + + case offsetof(struct sk_reuseport_md, migration): + SK_REUSEPORT_LOAD_FIELD(migration); + break; } return insn - insn_buf; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index b4fe0829c9ab..96d65b4c6974 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -349,7 +349,7 @@ struct sock *__reuseport_select_sock(struct sock *sk, u32 hash, goto select_by_hash; if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) - sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash); + sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash, migration); else sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index cfc207ae7782..efe342bf3dbc 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4419,6 +4419,7 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + __u8 migration; /* Migration type */ }; #define BPF_TAG_SIZE 8 -- 2.17.2 (Apple Git-113)