On 12/20/22 2:20 PM, Stanislav Fomichev wrote:
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
{
struct bpf_offload_netdev *ondev;
struct bpf_prog_offload *offload;
@@ -199,7 +197,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
attr->prog_type != BPF_PROG_TYPE_XDP)
return -EINVAL;
- if (attr->prog_flags)
+ if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
return -EINVAL;
offload = kzalloc(sizeof(*offload), GFP_USER);
@@ -214,11 +212,23 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
if (err)
goto err_maybe_put;
+ prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
Just noticed bpf_prog_dev_bound_init() takes BPF_PROG_TYPE_SCHED_CLS. Not sure
if there is device match check when attaching BPF_PROG_TYPE_SCHED_CLS. If not,
does it make sense to reject dev bound only BPF_PROG_TYPE_SCHED_CLS?
+
down_write(&bpf_devs_lock);
ondev = bpf_offload_find_netdev(offload->netdev);
if (!ondev) {
- err = -EINVAL;
- goto err_unlock;
+ if (bpf_prog_is_offloaded(prog->aux)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* When only binding to the device, explicitly
+ * create an entry in the hashtable.
+ */
+ err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
+ if (err)
+ goto err_unlock;
+ ondev = bpf_offload_find_netdev(offload->netdev);
}
offload->offdev = ondev->offdev;
prog->aux->offload = offload;
@@ -321,12 +331,41 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
up_read(&bpf_devs_lock);
}
-void bpf_prog_offload_destroy(struct bpf_prog *prog)
+static void __bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
+{
+ struct bpf_prog_offload *offload = prog->aux->offload;
+
+ if (offload->dev_state)
+ offload->offdev->ops->destroy(prog);
+
+ /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+ bpf_prog_free_id(prog, true);
+
+ kfree(offload);
+ prog->aux->offload = NULL;
+}
+
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
{
+ struct bpf_offload_netdev *ondev;
+ struct net_device *netdev;
+
+ rtnl_lock();
down_write(&bpf_devs_lock);
- if (prog->aux->offload)
- __bpf_prog_offload_destroy(prog);
+ if (prog->aux->offload) {
+ list_del_init(&prog->aux->offload->offloads);
+
+ netdev = prog->aux->offload->netdev;
After saving the netdev, would it work to call __bpf_prog_offload_destroy() here
instead of creating an almost identical __bpf_prog_dev_bound_destroy(). The
idea is to call list_del_init() first but does not need the "offload" around to
do the __bpf_offload_dev_netdev_unregister()?
+ if (netdev) {
I am thinking offload->netdev cannot be NULL. Did I overlook places that reset
offload->netdev back to NULL? eg. In bpf_prog_offload_info_fill_ns(), it is not
checking offload->netdev.
+ ondev = bpf_offload_find_netdev(netdev);
and ondev should not be NULL too?
I am trying to ensure my understanding that all offload->netdev and ondev should
be protected by bpf_devs_lock.
+ if (ondev && !ondev->offdev && list_empty(&ondev->progs))
+ __bpf_offload_dev_netdev_unregister(NULL, netdev);
+ }
+
+ __bpf_prog_dev_bound_destroy(prog);
+ }
up_write(&bpf_devs_lock);
+ rtnl_unlock();
}