Add link support for sk_msg program. This will make user space easy to manage as most common used programs have alrady have link support. Signed-off-by: Yonghong Song <yonghong.song@xxxxxxxxx> --- include/linux/bpf.h | 13 +++ include/uapi/linux/bpf.h | 5 ++ kernel/bpf/syscall.c | 3 + net/core/skmsg.c | 153 +++++++++++++++++++++++++++++++++ net/core/sock_map.c | 6 +- tools/include/uapi/linux/bpf.h | 5 ++ 6 files changed, 181 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 785660810e6a..a3112a295f5c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2982,10 +2982,14 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); void sock_map_unhash(struct sock *sk); void sock_map_destroy(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); + +int bpf_skmsg_link_create(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, struct bpf_prog_aux *prog_aux) @@ -3080,6 +3084,15 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, { return -EINVAL; } +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which) +{ + return -EOPNOTSUPP; +} +int bpf_skmsg_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a241f407c234..c7d2a5fcf37a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1129,6 +1129,7 @@ enum bpf_link_type { BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SK_MSG = 14, __MAX_BPF_LINK_TYPE, }; @@ -6699,6 +6700,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } skmsg; }; } __attribute__((aligned(8))); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b2750b79ac80..7fd3e6c93612 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -5155,6 +5155,9 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; + case BPF_PROG_TYPE_SK_MSG: + ret = bpf_skmsg_link_create(attr, prog); + break; #ifdef CONFIG_NET case BPF_PROG_TYPE_XDP: ret = bpf_xdp_link_attach(attr, prog); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 4d75ef9d24bf..2e3d15294966 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1256,3 +1256,156 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) sk->sk_data_ready = psock->saved_data_ready; psock->saved_data_ready = NULL; } + +struct bpf_skmsg_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +static DEFINE_MUTEX(link_mutex); + +static struct bpf_skmsg_link *bpf_skmsg_link(const struct bpf_link *link) +{ + return container_of(link, struct bpf_skmsg_link, link); +} + +static void bpf_skmsg_link_release(struct bpf_link *link) +{ + struct bpf_skmsg_link *skmsg_link = bpf_skmsg_link(link); + + mutex_lock(&link_mutex); + if (skmsg_link->map) { + (void)sock_map_prog_update(skmsg_link->map, NULL, link->prog, + skmsg_link->attach_type); + bpf_map_put_with_uref(skmsg_link->map); + skmsg_link->map = NULL; + } + mutex_unlock(&link_mutex); +} + +static int bpf_skmsg_link_detach(struct bpf_link *link) +{ + bpf_skmsg_link_release(link); + return 0; +} + +static void bpf_skmsg_link_dealloc(struct bpf_link *link) +{ + kfree(bpf_skmsg_link(link)); +} + +static int bpf_skmsg_link_update_prog(struct bpf_link *link, + struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + const struct bpf_skmsg_link *skmsg_link = bpf_skmsg_link(link); + int ret = 0; + + mutex_lock(&link_mutex); + if (old_prog && link->prog != old_prog) { + ret = -EPERM; + goto out; + } + + if (link->prog->type != new_prog->type) { + ret = -EINVAL; + goto out; + } + + ret = sock_map_prog_update(skmsg_link->map, new_prog, old_prog, + skmsg_link->attach_type); + if (!ret) + bpf_prog_inc(new_prog); + +out: + mutex_unlock(&link_mutex); + return ret; +} + +static int bpf_skmsg_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct bpf_skmsg_link *skmsg_link = bpf_skmsg_link(link); + u32 map_id = 0; + + mutex_lock(&link_mutex); + if (skmsg_link->map) + map_id = skmsg_link->map->id; + mutex_unlock(&link_mutex); + + info->skmsg.map_id = map_id; + info->skmsg.attach_type = skmsg_link->attach_type; + return 0; +} + +static void bpf_skmsg_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + const struct bpf_skmsg_link *skmsg_link = bpf_skmsg_link(link); + u32 map_id = 0; + + mutex_lock(&link_mutex); + if (skmsg_link->map) + map_id = skmsg_link->map->id; + mutex_unlock(&link_mutex); + + seq_printf(seq, "map_id:\t%u\n", map_id); + seq_printf(seq, "attach_type:\t%u (...)\n", skmsg_link->attach_type); +} + +static const struct bpf_link_ops bpf_skmsg_link_ops = { + .release = bpf_skmsg_link_release, + .dealloc = bpf_skmsg_link_dealloc, + .detach = bpf_skmsg_link_detach, + .update_prog = bpf_skmsg_link_update_prog, + .fill_link_info = bpf_skmsg_link_fill_info, + .show_fdinfo = bpf_skmsg_link_show_fdinfo, +}; + +int bpf_skmsg_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct bpf_skmsg_link *skmsg_link; + enum bpf_attach_type attach_type; + struct bpf_map *map; + int ret; + + if (attr->link_create.flags) + return -EINVAL; + + map = bpf_map_get_with_uref(attr->link_create.target_fd); + if (IS_ERR(map)) + return PTR_ERR(map); + + skmsg_link = kzalloc(sizeof(*skmsg_link), GFP_USER); + if (!skmsg_link) { + ret = -ENOMEM; + goto out; + } + + attach_type = attr->link_create.attach_type; + bpf_link_init(&skmsg_link->link, BPF_LINK_TYPE_SK_MSG, &bpf_skmsg_link_ops, prog); + skmsg_link->map = map; + skmsg_link->attach_type = attach_type; + + ret = bpf_link_prime(&skmsg_link->link, &link_primer); + if (ret) { + kfree(skmsg_link); + goto out; + } + + ret = sock_map_prog_update(map, prog, NULL, attach_type); + if (ret) { + bpf_link_cleanup(&link_primer); + goto out; + } + + bpf_prog_inc(prog); + + return bpf_link_settle(&link_primer); + +out: + bpf_map_put_with_uref(map); + return ret; +} diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 27d733c0f65e..63372bc368f1 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -24,8 +24,6 @@ struct bpf_stab { #define SOCK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) -static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which); static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); static struct bpf_map *sock_map_alloc(union bpf_attr *attr) @@ -1488,8 +1486,8 @@ static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog, return 0; } -static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which) +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which) { struct bpf_prog **pprog; int ret; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a241f407c234..c7d2a5fcf37a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1129,6 +1129,7 @@ enum bpf_link_type { BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SK_MSG = 14, __MAX_BPF_LINK_TYPE, }; @@ -6699,6 +6700,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } skmsg; }; } __attribute__((aligned(8))); -- 2.43.0