On Sun, Apr 21, 2024 at 07:43:36PM -0400, Siddharth Chintamaneni wrote: > This patch is to prevent deadlocks when multiple bpf > programs are attached to queued_spin_locks functions. This issue is similar > to what is already discussed[1] before with the spin_lock helpers. > > The addition of notrace macro to the queued_spin_locks > has been discussed[2] when bpf_spin_locks are introduced. > > [1] https://lore.kernel.org/bpf/CAE5sdEigPnoGrzN8WU7Tx-h-iFuMZgW06qp0KHWtpvoXxf1OAQ@xxxxxxxxxxxxxx/#r > [2] https://lore.kernel.org/all/20190117011629.efxp7abj4bpf5yco@ast-mbp/t/#maf05c4d71f935f3123013b7ed410e4f50e9da82c > > Fixes: d83525ca62cf ("bpf: introduce bpf_spin_lock") > Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@xxxxxx> > --- > kernel/locking/qspinlock.c | 2 +- > .../bpf/prog_tests/tracing_failure.c | 24 +++++++++++++++++++ > .../selftests/bpf/progs/tracing_failure.c | 6 +++++ > 3 files changed, 31 insertions(+), 1 deletion(-) > > diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c > index ebe6b8ec7cb3..4d46538d8399 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -313,7 +313,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, > * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : > * queue : ^--' : > */ > -void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > +notrace void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) we did the same for bpf spin lock helpers, which is fine, but I wonder removing queued_spin_lock_slowpath from traceable functions could break some scripts (even though many probably use contention tracepoints..) maybe we could have a list of helpers/kfuncs that could call spin lock and deny bpf program to load/attach to queued_spin_lock_slowpath if it calls anything from that list > { > struct mcs_spinlock *prev, *next, *node; > u32 old, tail; > diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c > index a222df765bc3..822ee6c559bc 100644 > --- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c > +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c > @@ -28,10 +28,34 @@ static void test_bpf_spin_lock(bool is_spin_lock) > tracing_failure__destroy(skel); > } > > +static void test_queued_spin_lock(void) > +{ > + struct tracing_failure *skel; > + int err; > + > + skel = tracing_failure__open(); > + if (!ASSERT_OK_PTR(skel, "tracing_failure__open")) > + return; > + > + bpf_program__set_autoload(skel->progs.test_queued_spin_lock, true); > + > + err = tracing_failure__load(skel); > + if (!ASSERT_OK(err, "tracing_failure__load")) > + goto out; > + > + err = tracing_failure__attach(skel); > + ASSERT_ERR(err, "tracing_failure__attach"); the test is broken, fentry program won't load with notrace function [root@qemu bpf]# ./test_progs -n 391/3 test_queued_spin_lock:PASS:tracing_failure__open 0 nsec libbpf: prog 'test_queued_spin_lock': failed to find kernel BTF type ID of 'queued_spin_lock_slowpath': -3 libbpf: prog 'test_queued_spin_lock': failed to prepare load attributes: -3 libbpf: prog 'test_queued_spin_lock': failed to load: -3 libbpf: failed to load object 'tracing_failure' libbpf: failed to load BPF skeleton 'tracing_failure': -3 test_queued_spin_lock:FAIL:tracing_failure__load unexpected error: -3 (errno 3) #391/3 tracing_failure/queued_spin_lock_slowpath:FAIL #391 tracing_failure:FAIL jirka > + > +out: > + tracing_failure__destroy(skel); > +} > + > void test_tracing_failure(void) > { > if (test__start_subtest("bpf_spin_lock")) > test_bpf_spin_lock(true); > if (test__start_subtest("bpf_spin_unlock")) > test_bpf_spin_lock(false); > + if (test__start_subtest("queued_spin_lock_slowpath")) > + test_queued_spin_lock(); > } > diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c > index d41665d2ec8c..2d2e7fc9d4f0 100644 > --- a/tools/testing/selftests/bpf/progs/tracing_failure.c > +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c > @@ -18,3 +18,9 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) > { > return 0; > } > + > +SEC("?fentry/queued_spin_lock_slowpath") > +int BPF_PROG(test_queued_spin_lock, struct qspinlock *lock, u32 val) > +{ > + return 0; > +} > -- > 2.43.0 >