devtx is a lightweight set of hooks before and after packet transmission. The hook is supposed to work for both skb and xdp paths by exposing a light-weight packet wrapper via devtx_frame (header portion + frags). devtx is implemented as a tracing program which has access to the XDP-metadata-like kfuncs. The initial set of kfuncs is implemented in the next patch, but the idea is similar to XDP metadata: the kfuncs have netdev-specific implementation, but common interface. Upon loading, the kfuncs are resolved to direct calls against per-netdev implementation. This can be achieved by marking devtx-tracing programs as dev-bound (largely reusing xdp-dev-bound program infrastructure). Cc: netdev@xxxxxxxxxxxxxxx Signed-off-by: Stanislav Fomichev <sdf@xxxxxxxxxx> --- MAINTAINERS | 2 ++ include/net/devtx.h | 71 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/offload.c | 15 +++++++++ net/core/Makefile | 1 + net/core/dev.c | 1 + net/core/devtx.c | 76 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 166 insertions(+) create mode 100644 include/net/devtx.h create mode 100644 net/core/devtx.c diff --git a/MAINTAINERS b/MAINTAINERS index c904dba1733b..516529b42e66 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22976,11 +22976,13 @@ L: bpf@xxxxxxxxxxxxxxx S: Supported F: drivers/net/ethernet/*/*/*/*/*xdp* F: drivers/net/ethernet/*/*/*xdp* +F: include/net/devtx.h F: include/net/xdp.h F: include/net/xdp_priv.h F: include/trace/events/xdp.h F: kernel/bpf/cpumap.c F: kernel/bpf/devmap.c +F: net/core/devtx.c F: net/core/xdp.c F: samples/bpf/xdp* F: tools/testing/selftests/bpf/*/*xdp* diff --git a/include/net/devtx.h b/include/net/devtx.h new file mode 100644 index 000000000000..d1c75fd9b377 --- /dev/null +++ b/include/net/devtx.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __LINUX_NET_DEVTX_H__ +#define __LINUX_NET_DEVTX_H__ + +#include <linux/jump_label.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/btf_ids.h> +#include <net/xdp.h> + +struct devtx_frame { + void *data; + u16 len; + u8 meta_len; + struct skb_shared_info *sinfo; /* for frags */ + struct net_device *netdev; +}; + +#ifdef CONFIG_NET +void devtx_hooks_enable(void); +void devtx_hooks_disable(void); +bool devtx_hooks_match(u32 attach_btf_id, const struct xdp_metadata_ops *xmo); +int devtx_hooks_register(struct btf_id_set8 *set, const struct xdp_metadata_ops *xmo); +void devtx_hooks_unregister(struct btf_id_set8 *set); + +static inline void devtx_frame_from_skb(struct devtx_frame *ctx, struct sk_buff *skb, + struct net_device *netdev) +{ + ctx->data = skb->data; + ctx->len = skb_headlen(skb); + ctx->meta_len = skb_metadata_len(skb); + ctx->sinfo = skb_shinfo(skb); + ctx->netdev = netdev; +} + +static inline void devtx_frame_from_xdp(struct devtx_frame *ctx, struct xdp_frame *xdpf, + struct net_device *netdev) +{ + ctx->data = xdpf->data; + ctx->len = xdpf->len; + ctx->meta_len = xdpf->metasize & 0xff; + ctx->sinfo = xdp_frame_has_frags(xdpf) ? xdp_get_shared_info_from_frame(xdpf) : NULL; + ctx->netdev = netdev; +} + +DECLARE_STATIC_KEY_FALSE(devtx_enabled_key); + +static inline bool devtx_enabled(void) +{ + return static_branch_unlikely(&devtx_enabled_key); +} +#else +static inline void devtx_hooks_enable(void) {} +static inline void devtx_hooks_disable(void) {} +static inline bool devtx_hooks_match(u32 attach_btf_id, const struct xdp_metadata_ops *xmo) {} +static inline int devtx_hooks_register(struct btf_id_set8 *set, + const struct xdp_metadata_ops *xmo) {} +static inline void devtx_hooks_unregister(struct btf_id_set8 *set) {} + +static inline void devtx_frame_from_skb(struct devtx_frame *ctx, struct sk_buff *skb, + struct net_device *netdev) {} +static inline void devtx_frame_from_xdp(struct devtx_frame *ctx, struct xdp_frame *xdpf, + struct net_device *netdev) {} + +static inline bool devtx_enabled(void) +{ + return false; +} +#endif + +#endif /* __LINUX_NET_DEVTX_H__ */ diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 235d81f7e0ed..f01a1aa0f627 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -25,6 +25,7 @@ #include <linux/rhashtable.h> #include <linux/rtnetlink.h> #include <linux/rwsem.h> +#include <net/devtx.h> /* Protects offdevs, members of bpf_offload_netdev and offload members * of all progs. @@ -228,6 +229,7 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) int err; if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && + attr->prog_type != BPF_PROG_TYPE_TRACING && attr->prog_type != BPF_PROG_TYPE_XDP) return -EINVAL; @@ -242,6 +244,15 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) if (!netdev) return -EINVAL; + /* Make sure device-bound tracing programs are being attached + * to the appropriate netdev. + */ + if (attr->prog_type == BPF_PROG_TYPE_TRACING && + !devtx_hooks_match(prog->aux->attach_btf_id, netdev->xdp_metadata_ops)) { + err = -EINVAL; + goto out; + } + err = bpf_dev_offload_check(netdev); if (err) goto out; @@ -252,6 +263,9 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) err = __bpf_prog_dev_bound_init(prog, netdev); up_write(&bpf_devs_lock); + if (!err) + devtx_hooks_enable(); + out: dev_put(netdev); return err; @@ -384,6 +398,7 @@ void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) ondev = bpf_offload_find_netdev(netdev); if (!ondev->offdev && list_empty(&ondev->progs)) __bpf_offload_dev_netdev_unregister(NULL, netdev); + devtx_hooks_disable(); } up_write(&bpf_devs_lock); rtnl_unlock(); diff --git a/net/core/Makefile b/net/core/Makefile index 8f367813bc68..c1db05ccfac7 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -39,4 +39,5 @@ obj-$(CONFIG_FAILOVER) += failover.o obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o obj-$(CONFIG_BPF_SYSCALL) += sock_map.o obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o +obj-$(CONFIG_BPF_SYSCALL) += devtx.o obj-$(CONFIG_OF) += of_net.o diff --git a/net/core/dev.c b/net/core/dev.c index 3393c2f3dbe8..e2f4618ee1c5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -150,6 +150,7 @@ #include <linux/pm_runtime.h> #include <linux/prandom.h> #include <linux/once_lite.h> +#include <net/devtx.h> #include "dev.h" #include "net-sysfs.h" diff --git a/net/core/devtx.c b/net/core/devtx.c new file mode 100644 index 000000000000..bad694439ae3 --- /dev/null +++ b/net/core/devtx.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <net/devtx.h> +#include <linux/filter.h> + +DEFINE_STATIC_KEY_FALSE(devtx_enabled_key); +EXPORT_SYMBOL_GPL(devtx_enabled_key); + +struct devtx_hook_entry { + struct list_head devtx_hooks; + struct btf_id_set8 *set; + const struct xdp_metadata_ops *xmo; +}; + +static LIST_HEAD(devtx_hooks); +static DEFINE_MUTEX(devtx_hooks_lock); + +void devtx_hooks_enable(void) +{ + static_branch_inc(&devtx_enabled_key); +} + +void devtx_hooks_disable(void) +{ + static_branch_dec(&devtx_enabled_key); +} + +bool devtx_hooks_match(u32 attach_btf_id, const struct xdp_metadata_ops *xmo) +{ + struct devtx_hook_entry *entry, *tmp; + bool match = false; + + mutex_lock(&devtx_hooks_lock); + list_for_each_entry_safe(entry, tmp, &devtx_hooks, devtx_hooks) { + if (btf_id_set8_contains(entry->set, attach_btf_id)) { + match = entry->xmo == xmo; + break; + } + } + mutex_unlock(&devtx_hooks_lock); + + return match; +} + +int devtx_hooks_register(struct btf_id_set8 *set, const struct xdp_metadata_ops *xmo) +{ + struct devtx_hook_entry *entry; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->set = set; + entry->xmo = xmo; + + mutex_lock(&devtx_hooks_lock); + list_add(&entry->devtx_hooks, &devtx_hooks); + mutex_unlock(&devtx_hooks_lock); + + return 0; +} + +void devtx_hooks_unregister(struct btf_id_set8 *set) +{ + struct devtx_hook_entry *entry, *tmp; + + mutex_lock(&devtx_hooks_lock); + list_for_each_entry_safe(entry, tmp, &devtx_hooks, devtx_hooks) { + if (entry->set == set) { + list_del(&entry->devtx_hooks); + kfree(entry); + break; + } + } + mutex_unlock(&devtx_hooks_lock); +} -- 2.41.0.162.gfafddb0af9-goog