[PATCH bpf-next] Add notrace to queued_spin_lock_slowpath function to avoid deadlocks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch is to prevent deadlocks when multiple bpf
programs are attached to queued_spin_locks functions. This issue is similar
to what is already discussed [1] before with the spin_lock helpers.

The addition of notrace macro to the queued_spin_locks
has been discussed [2] when bpf_spin_locks are introduced.

[1] https://lore.kernel.org/bpf/CAE5sdEigPnoGrzN8WU7Tx-h-iFuMZgW06qp0KHWtpvoXxf1OAQ@xxxxxxxxxxxxxx/#r
[2] https://lore.kernel.org/all/20190117011629.efxp7abj4bpf5yco@ast-mbp/t/#maf05c4d71f935f3123013b7ed410e4f50e9da82c

Fixes: d83525ca62cf ("bpf: introduce bpf_spin_lock")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@xxxxxx>
---
 kernel/locking/qspinlock.c                    |  2 +-
 .../bpf/prog_tests/tracing_failure.c          | 24 +++++++++++++++++++
 .../selftests/bpf/progs/tracing_failure.c     |  6 +++++
 3 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ebe6b8ec7cb3..4d46538d8399 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -313,7 +313,7 @@ static __always_inline u32
__pv_wait_head_or_lock(struct qspinlock *lock,
  * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
  *   queue               :         ^--'                             :
  */
-void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+notrace void __lockfunc queued_spin_lock_slowpath(struct qspinlock
*lock, u32 val)
 {
  struct mcs_spinlock *prev, *next, *node;
  u32 old, tail;
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
index a222df765bc3..822ee6c559bc 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
@@ -28,10 +28,34 @@ static void test_bpf_spin_lock(bool is_spin_lock)
  tracing_failure__destroy(skel);
 }

+static void test_queued_spin_lock(void)
+{
+ struct tracing_failure *skel;
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_queued_spin_lock, true);
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_OK(err, "tracing_failure__load"))
+ goto out;
+
+ err = tracing_failure__attach(skel);
+ ASSERT_ERR(err, "tracing_failure__attach");
+
+out:
+ tracing_failure__destroy(skel);
+}
+
 void test_tracing_failure(void)
 {
  if (test__start_subtest("bpf_spin_lock"))
  test_bpf_spin_lock(true);
  if (test__start_subtest("bpf_spin_unlock"))
  test_bpf_spin_lock(false);
+ if (test__start_subtest("queued_spin_lock_slowpath"))
+ test_queued_spin_lock();
 }
diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c
b/tools/testing/selftests/bpf/progs/tracing_failure.c
index d41665d2ec8c..2d2e7fc9d4f0 100644
--- a/tools/testing/selftests/bpf/progs/tracing_failure.c
+++ b/tools/testing/selftests/bpf/progs/tracing_failure.c
@@ -18,3 +18,9 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock)
 {
  return 0;
 }
+
+SEC("?fentry/queued_spin_lock_slowpath")
+int BPF_PROG(test_queued_spin_lock, struct qspinlock *lock, u32 val)
+{
+ return 0;
+}
--
2.43.0




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux