Re: [RFC PATCH bpf-next 06/11] bpf: Add bpf_rbtree_{lock,unlock} helpers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 7/22/22 11:34 AM, Dave Marchevsky wrote:
These helpers are equivalent to bpf_spin_{lock,unlock}, but the verifier
doesn't try to enforce that no helper calls occur when there's an active
spin lock.

[ TODO: Currently the verifier doesn't do _anything_ spinlock related
when it sees one of these, including setting active_spin_lock. This is
probably too lenient. Also, EXPORT_SYMBOL for internal lock helpers
might not be the best code structure. ]

Future patches will add enforcement of "rbtree helpers must always be
called when lock is held" constraint.

Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
---
  include/uapi/linux/bpf.h       | 20 ++++++++++++++++++++
  kernel/bpf/helpers.c           | 12 ++++++++++--
  kernel/bpf/rbtree.c            | 29 +++++++++++++++++++++++++++++
  kernel/bpf/verifier.c          |  2 ++
  tools/include/uapi/linux/bpf.h | 20 ++++++++++++++++++++
  5 files changed, 81 insertions(+), 2 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c677d92de3bc..d21e2c99ea14 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5391,6 +5391,24 @@ union bpf_attr {
   *
   *	Return
   *		Ptr to lock
+ *
+ * void *bpf_rbtree_lock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_lock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
+ *
+ * void *bpf_rbtree_unlock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_unlock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
   */
  #define __BPF_FUNC_MAPPER(FN)		\
  	FN(unspec),			\
@@ -5607,6 +5625,8 @@ union bpf_attr {
  	FN(rbtree_remove),		\
  	FN(rbtree_free_node),		\
  	FN(rbtree_get_lock),		\
+	FN(rbtree_lock),		\
+	FN(rbtree_unlock),		\
  	/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 257a808bb767..fa2dba1dcec8 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -303,7 +303,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
static DEFINE_PER_CPU(unsigned long, irqsave_flags); -static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
+inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
  {
  	unsigned long flags;
@@ -311,6 +311,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
  	__bpf_spin_lock(lock);
  	__this_cpu_write(irqsave_flags, flags);
  }
+EXPORT_SYMBOL(__bpf_spin_lock_irqsave);

what is it for?
It's not used out of modules.

notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
  {
@@ -325,7 +326,7 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
  	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
  };
-static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
+inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
  {
  	unsigned long flags;
@@ -333,6 +334,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
  	__bpf_spin_unlock(lock);
  	local_irq_restore(flags);
  }
+EXPORT_SYMBOL(__bpf_spin_unlock_irqrestore);
notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
  {
@@ -1588,6 +1590,8 @@ const struct bpf_func_proto bpf_rbtree_find_proto __weak;
  const struct bpf_func_proto bpf_rbtree_remove_proto __weak;
  const struct bpf_func_proto bpf_rbtree_free_node_proto __weak;
  const struct bpf_func_proto bpf_rbtree_get_lock_proto __weak;
+const struct bpf_func_proto bpf_rbtree_lock_proto __weak;
+const struct bpf_func_proto bpf_rbtree_unlock_proto __weak;
const struct bpf_func_proto *
  bpf_base_func_proto(enum bpf_func_id func_id)
@@ -1689,6 +1693,10 @@ bpf_base_func_proto(enum bpf_func_id func_id)
  		return &bpf_rbtree_free_node_proto;
  	case BPF_FUNC_rbtree_get_lock:
  		return &bpf_rbtree_get_lock_proto;
+	case BPF_FUNC_rbtree_lock:
+		return &bpf_rbtree_lock_proto;
+	case BPF_FUNC_rbtree_unlock:
+		return &bpf_rbtree_unlock_proto;
  	default:
  		break;
  	}
diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
index c6f0a2a083f6..bf2e30af82ec 100644
--- a/kernel/bpf/rbtree.c
+++ b/kernel/bpf/rbtree.c
@@ -262,6 +262,35 @@ const struct bpf_func_proto bpf_rbtree_get_lock_proto = {
  	.arg1_type = ARG_CONST_MAP_PTR,
  };
+extern void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock);
+extern void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock);
+
+BPF_CALL_1(bpf_rbtree_lock, void *, lock)
+{
+	__bpf_spin_lock_irqsave((struct bpf_spin_lock *)lock);
+	return 0;
+}

it doesn't have to be bpf_spin_lock.
Normal spin_lock will do.
bpf_spin_lock has specific size requirement, so when it's used inside
map value the value size doesn't change from kernel to kernel.
Since this lock is hidden it can be any lock.

Also it needs to remember current task or something.
Just spin_is_locked() from bpf_rbtree_add() is not enough.
Instead of remembering current we can pass hidden 'prog' pointer
and remember that in bpf_rbtree_lock.
Also pass that hidden prog ptr to add/remove/find and compare.
But probably overkill. Current task should be fine.




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux