[PATCH v1 bpf-next 5/9] [RFC] selftests/bpf: Add unsafe lock/unlock and refcount_read kfuncs to bpf_testmod

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[
RFC: This patch currently copies static inline helpers:

  __bpf_spin_lock
  __bpf_spin_unlock
  __bpf_spin_lock_irqsave
  __bpf_spin_unlock_irqrestore

from kernel/bpf/helpers.c . The definition of these helpers is
config-dependant and they're not meant to be called from a module, so
not sure how to proceed here.
]

This patch adds three unsafe kfuncs to bpf_testmod for use in
selftests:

  - bpf__unsafe_spin_lock
  - bpf__unsafe_spin_unlock
  - bpf_refcount_read

The first two are equivalent to bpf_spin_{lock, unlock}, except without
any special treatment from the verifier, which allows them to be used in
tests to guarantee a specific interleaving of program execution. This
will simplify testing race conditions in BPF programs, as demonstrated
in further patches in the series. The kfuncs are marked KF_DESTRUCTIVE
as they can easily cause deadlock, and are only intended to be used in
tests.

bpf_refcount_read simply reads the refcount from the uapi-opaque
bpf_refcount struct and returns it. This allows more precise testing of
specific bpf_refcount scenarios, also demonstrated in further patches in
the series. Although this kfunc can't break the system as
catastrophically as the unsafe locking kfuncs, it's also marked
KF_DESTRUCTIVE as it relies on bpf_refcount implementation details, and
shouldn't be used outside of tests regardless.

Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
---
 .../selftests/bpf/bpf_testmod/bpf_testmod.c   | 61 +++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 52785ba671e6..30762558b16f 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -108,6 +108,64 @@ __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
 	it->cnt = 0;
 }
 
+/* BEGIN copied from kernel/bpf/helpers.c */
+static DEFINE_PER_CPU(unsigned long, irqsave_flags);
+
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
+{
+        arch_spinlock_t *l = (void *)lock;
+        union {
+                __u32 val;
+                arch_spinlock_t lock;
+        } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
+
+        compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
+        BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
+        BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
+        arch_spin_lock(l);
+}
+
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
+{
+        arch_spinlock_t *l = (void *)lock;
+
+        arch_spin_unlock(l);
+}
+
+static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
+{
+        unsigned long flags;
+
+        local_irq_save(flags);
+        __bpf_spin_lock(lock);
+        __this_cpu_write(irqsave_flags, flags);
+}
+
+static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
+{
+        unsigned long flags;
+
+        flags = __this_cpu_read(irqsave_flags);
+        __bpf_spin_unlock(lock);
+        local_irq_restore(flags);
+}
+/* END copied from kernel/bpf/helpers.c */
+
+__bpf_kfunc void bpf__unsafe_spin_lock(void *lock__ign)
+{
+	__bpf_spin_lock_irqsave((struct bpf_spin_lock *)lock__ign);
+}
+
+__bpf_kfunc void bpf__unsafe_spin_unlock(void *lock__ign)
+{
+	__bpf_spin_unlock_irqrestore((struct bpf_spin_lock *)lock__ign);
+}
+
+__bpf_kfunc int bpf_refcount_read(void *refcount__ign)
+{
+	return refcount_read((refcount_t *)refcount__ign);
+}
+
 struct bpf_testmod_btf_type_tag_1 {
 	int a;
 };
@@ -282,6 +340,9 @@ BTF_SET8_START(bpf_testmod_common_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf__unsafe_spin_lock, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf__unsafe_spin_unlock, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf_refcount_read, KF_DESTRUCTIVE)
 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
 
 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
-- 
2.34.1





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux