From: Toke Høiland-Jørgensen <toke@xxxxxxxxxx> Instead of rejecting the attaching of PROG_TYPE_EXT programs to XDP programs that consume HW metadata, implement support for propagating the offload information. The extension program doesn't need to set a flag or ifindex, these will just be propagated from the target by the verifier. We need to create a separate offload object for the extension program, though, since it can be reattached to a different program later (which means we can't just inherit the offload information from the target). An additional check is added on attach that the new target is compatible with the offload information in the extension prog. Signed-off-by: Toke Høiland-Jørgensen <toke@xxxxxxxxxx> Signed-off-by: Stanislav Fomichev <sdf@xxxxxxxxxx> --- include/linux/bpf.h | 14 ++++++ kernel/bpf/offload.c | 107 ++++++++++++++++++++++++++++++++---------- kernel/bpf/syscall.c | 12 +++++ kernel/bpf/verifier.c | 5 -- 4 files changed, 109 insertions(+), 29 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 969f53691dd4..f1bacd8ee402 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2482,6 +2482,7 @@ void unpriv_ebpf_notify(int new_state); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); +int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); void bpf_dev_bound_netdev_unregister(struct net_device *dev); static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) @@ -2494,6 +2495,8 @@ static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) return aux->offload_requested; } +bool bpf_prog_dev_bound_match(struct bpf_prog *lhs, struct bpf_prog *rhs); + static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return unlikely(map->ops == &bpf_map_offload_ops); @@ -2527,6 +2530,12 @@ static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, return -EOPNOTSUPP; } +static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + return -EOPNOTSUPP; +} + static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) { } @@ -2541,6 +2550,11 @@ static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) return false; } +static inline bool bpf_prog_dev_bound_match(struct bpf_prog *lhs, struct bpf_prog *rhs) +{ + return false; +} + static inline bool bpf_map_is_offloaded(struct bpf_map *map) { return false; diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 0e3fc743e0a8..60978a1f9baa 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -187,17 +187,13 @@ static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, kfree(ondev); } -int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) +static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev) { struct bpf_offload_netdev *ondev; struct bpf_prog_offload *offload; int err; - if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && - attr->prog_type != BPF_PROG_TYPE_XDP) - return -EINVAL; - - if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) + if (!netdev) return -EINVAL; offload = kzalloc(sizeof(*offload), GFP_USER); @@ -205,21 +201,13 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) return -ENOMEM; offload->prog = prog; + offload->netdev = netdev; - offload->netdev = dev_get_by_index(current->nsproxy->net_ns, - attr->prog_ifindex); - err = bpf_dev_offload_check(offload->netdev); - if (err) - goto err_maybe_put; - - prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); - - down_write(&bpf_devs_lock); ondev = bpf_offload_find_netdev(offload->netdev); if (!ondev) { if (bpf_prog_is_offloaded(prog->aux)) { err = -EINVAL; - goto err_unlock; + goto err_free; } /* When only binding to the device, explicitly @@ -227,25 +215,80 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) */ err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); if (err) - goto err_unlock; + goto err_free; ondev = bpf_offload_find_netdev(offload->netdev); } offload->offdev = ondev->offdev; prog->aux->offload = offload; list_add_tail(&offload->offloads, &ondev->progs); - dev_put(offload->netdev); - up_write(&bpf_devs_lock); return 0; -err_unlock: - up_write(&bpf_devs_lock); -err_maybe_put: - if (offload->netdev) - dev_put(offload->netdev); +err_free: kfree(offload); return err; } +int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) +{ + struct net_device *netdev; + int err; + + if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && + attr->prog_type != BPF_PROG_TYPE_XDP) + return -EINVAL; + + if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) + return -EINVAL; + + netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex); + if (!netdev) + return -EINVAL; + + down_write(&bpf_devs_lock); + err = bpf_dev_offload_check(netdev); + if (err) + goto out; + + prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); + + err = __bpf_prog_dev_bound_init(prog, netdev); + if (err) + goto out; + +out: + dev_put(netdev); + up_write(&bpf_devs_lock); + return err; +} + +int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog) +{ + int err; + + if (!bpf_prog_is_dev_bound(old_prog->aux)) + return 0; + + if (bpf_prog_is_offloaded(old_prog->aux)) + return -EINVAL; + + down_write(&bpf_devs_lock); + if (!old_prog->aux->offload) { + err = -EINVAL; + goto out; + } + + new_prog->aux->dev_bound = old_prog->aux->dev_bound; + new_prog->aux->offload_requested = old_prog->aux->offload_requested; + + err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev); + if (err) + goto out; + +out: + up_write(&bpf_devs_lock); + return err; +} + int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) { struct bpf_prog_offload *offload; @@ -687,6 +730,22 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) } EXPORT_SYMBOL_GPL(bpf_offload_dev_match); +bool bpf_prog_dev_bound_match(struct bpf_prog *lhs, struct bpf_prog *rhs) +{ + bool ret; + + if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux)) + return false; + + down_read(&bpf_devs_lock); + ret = lhs->aux->offload && rhs->aux->offload && + lhs->aux->offload->netdev && + lhs->aux->offload->netdev == rhs->aux->offload->netdev; + up_read(&bpf_devs_lock); + + return ret; +} + bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) { struct bpf_offloaded_map *offmap; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 11c558be4992..64a68e8fb072 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2605,6 +2605,12 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) goto free_prog_sec; } + if (type == BPF_PROG_TYPE_EXT && dst_prog) { + err = bpf_prog_dev_bound_inherit(prog, dst_prog); + if (err) + goto free_prog_sec; + } + /* find program type: socket_filter vs tracing_filter */ err = find_prog_type(type, prog); if (err < 0) @@ -3021,6 +3027,12 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, goto out_put_prog; } + if (bpf_prog_is_dev_bound(prog->aux) && + !bpf_prog_dev_bound_match(prog, tgt_prog)) { + err = -EINVAL; + goto out_put_prog; + } + key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 320451a0be3e..64f4d2b5824f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16537,11 +16537,6 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, if (tgt_prog) { struct bpf_prog_aux *aux = tgt_prog->aux; - if (bpf_prog_is_dev_bound(tgt_prog->aux)) { - bpf_log(log, "Replacing device-bound programs not supported\n"); - return -EINVAL; - } - for (i = 0; i < aux->func_info_cnt; i++) if (aux->func_info[i].type_id == btf_id) { subprog = i; -- 2.39.0.314.g84b9a713c41-goog