Hi Christian, > On Aug 19, 2024, at 1:25 PM, Song Liu <songliubraving@xxxxxx> wrote: > > Hi Christian, [...] > If you provide a bpf_get_parent() api for userspace to consume you'll >> end up providing them with an api that is extremly easy to misuse. > > Does this make sense to have higher level API that walks up the path, > so that it takes mounts into account. It can probably be something like: > > int bpf_get_parent_path(struct path *p) { > again: > if (p->dentry == p->mnt.mnt_root) { > follow_up(p); > goto again; > } > if (unlikely(IS_ROOT(p->dentry))) { > return PARENT_WALK_DONE; > } > parent_dentry = dget_parent(p->dentry); > dput(p->dentry); > p->dentry = parent_dentry; > return PARENT_WALK_NEXT; > } > > This will handle the mount. However, we cannot guarantee deny-by-default > policies like LandLock does, because this is just a building block of > some security policies. I guess the above is not really clear. Here is a prototype I got. With the kernel diff attached below, we are able to do something like: SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) { /* ... */ bpf_for_each(dentry, dentry, &f->f_path, BPF_DENTRY_ITER_TO_ROOT) { ret = bpf_get_dentry_xattr(dentry, "user.kfunc", &value_ptr); /* do work with the xattr in value_ptr */ } /* ... */ } With this helper, the user cannot walk the tree randomly. Instead, the walk has to follow some pattern, namely, TO_ROOT, TO_MNT_ROOT, etc. And helper makes sure the walk is safe. Does this solution make sense to you? Thanks, Song The kernel diff below. ============================== 8< =============================== diff --git c/fs/bpf_fs_kfuncs.c w/fs/bpf_fs_kfuncs.c index 3fe9f59ef867..4b1400dec984 100644 --- c/fs/bpf_fs_kfuncs.c +++ w/fs/bpf_fs_kfuncs.c @@ -8,6 +8,7 @@ #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> +#include <linux/namei.h> #include <linux/xattr.h> __bpf_kfunc_start_defs(); @@ -154,13 +155,91 @@ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, __bpf_kfunc_end_defs(); +struct bpf_iter_dentry { + __u64 __opaque[3]; +} __attribute__((aligned(8))); + +struct bpf_iter_dentry_kern { + struct path path; + unsigned int flags; +} __attribute__((aligned(8))); + +enum { + /* all the parent paths, until root (/) */ + BPF_DENTRY_ITER_TO_ROOT, + /* all the parent paths, until mnt root */ + BPF_DENTRY_ITER_TO_MNT_ROOT, +}; + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_iter_dentry_new(struct bpf_iter_dentry *it, + struct path *path, unsigned int flags) +{ + struct bpf_iter_dentry_kern *kit = (void*)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_dentry_kern) > + sizeof(struct bpf_iter_dentry)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_dentry_kern) != + __alignof__(struct bpf_iter_dentry)); + + if (flags) + return -EINVAL; + + switch (flags) { + case BPF_DENTRY_ITER_TO_ROOT: + case BPF_DENTRY_ITER_TO_MNT_ROOT: + break; + default: + return -EINVAL; + } + kit->path = *path; + path_get(&kit->path); + kit->flags = flags; + return 0; +} + +__bpf_kfunc struct dentry *bpf_iter_dentry_next(struct bpf_iter_dentry *it) +{ + struct bpf_iter_dentry_kern *kit = (void*)it; + struct dentry *parent_dentry; + + if (unlikely(IS_ROOT(kit->path.dentry))) + return NULL; + +jump_up: + if (kit->path.dentry == kit->path.mnt->mnt_root) { + if (kit->flags == BPF_DENTRY_ITER_TO_MNT_ROOT) + return NULL; + if (follow_up(&kit->path)) { + goto jump_up; + } + } + parent_dentry = dget_parent(kit->path.dentry); + dput(kit->path.dentry); + kit->path.dentry = parent_dentry; + return parent_dentry; +} + +__bpf_kfunc void bpf_iter_dentry_destroy(struct bpf_iter_dentry *it) +{ + struct bpf_iter_dentry_kern *kit = (void*)it; + + path_put(&kit->path); +} + +__bpf_kfunc_end_defs(); + BTF_KFUNCS_START(bpf_fs_kfunc_set_ids) BTF_ID_FLAGS(func, bpf_get_task_exe_file, KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE) BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE) /* Will fix this later */ BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_dentry_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_dentry_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_dentry_destroy, KF_ITER_DESTROY) BTF_KFUNCS_END(bpf_fs_kfunc_set_ids) static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)