The bpf_get_slab_cache() is to get a slab cache information from a virtual address like virt_to_cache(). If the address is a pointer to a slab object, it'd return a valid kmem_cache pointer, otherwise NULL is returned. It doesn't grab a reference count of the kmem_cache so the caller is responsible to manage the access. The intended use case for now is to symbolize locks in slab objects from the lock contention tracepoints. Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx> --- kernel/bpf/helpers.c | 1 + mm/slab_common.c | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1a43d06eab286c26..03db007a247175c4 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3090,6 +3090,7 @@ BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_get_slab_cache, KF_RET_NULL) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/mm/slab_common.c b/mm/slab_common.c index 7443244656150325..a87adcf182f49fc4 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1322,6 +1322,20 @@ size_t ksize(const void *objp) } EXPORT_SYMBOL(ksize); +#ifdef CONFIG_BPF_SYSCALL +__bpf_kfunc_start_defs(); + +__bpf_kfunc struct kmem_cache *bpf_get_slab_cache(u64 addr) +{ + struct slab *slab; + + slab = virt_to_slab((void *)(long)addr); + return slab ? slab->slab_cache : NULL; +} + +__bpf_kfunc_end_defs(); +#endif /* CONFIG_BPF_SYSCALL */ + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); -- 2.46.0.792.g87dc391469-goog