Re: [PATCH bpf-next v1 14/22] rqspinlock: Add macros for rqspinlock usage

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/7/25 8:59 AM, Kumar Kartikeya Dwivedi wrote:
Introduce helper macros that wrap around the rqspinlock slow path and
provide an interface analogous to the raw_spin_lock API. Note that
in case of error conditions, preemption and IRQ disabling is
automatically unrolled before returning the error back to the caller.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
---
  include/asm-generic/rqspinlock.h | 58 ++++++++++++++++++++++++++++++++
  1 file changed, 58 insertions(+)

diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index dc436ab01471..53be8426373c 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -12,8 +12,10 @@
  #include <linux/types.h>
  #include <vdso/time64.h>
  #include <linux/percpu.h>
+#include <asm/qspinlock.h>
struct qspinlock;
+typedef struct qspinlock rqspinlock_t;
extern int resilient_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val, u64 timeout); @@ -82,4 +84,60 @@ static __always_inline void release_held_lock_entry(void)
  	this_cpu_dec(rqspinlock_held_locks.cnt);
  }
+/**
+ * res_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline int res_spin_lock(rqspinlock_t *lock)
+{
+	int val = 0;
+
+	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) {
+		grab_held_lock_entry(lock);
+		return 0;
+	}
+	return resilient_queued_spin_lock_slowpath(lock, val, RES_DEF_TIMEOUT);
+}
+
+static __always_inline void res_spin_unlock(rqspinlock_t *lock)
+{
+	struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+
+	if (unlikely(rqh->cnt > RES_NR_HELD))
+		goto unlock;
+	WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
+	/*
+	 * Release barrier, ensuring ordering. See release_held_lock_entry.
+	 */
+unlock:
+	queued_spin_unlock(lock);
+	this_cpu_dec(rqspinlock_held_locks.cnt);
+}
+
+#define raw_res_spin_lock_init(lock) ({ *(lock) = (struct qspinlock)__ARCH_SPIN_LOCK_UNLOCKED; })
+
+#define raw_res_spin_lock(lock)                    \
+	({                                         \
+		int __ret;                         \
+		preempt_disable();                 \
+		__ret = res_spin_lock(lock);	   \
+		if (__ret)                         \
+			preempt_enable();          \
+		__ret;                             \
+	})
+
+#define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
+
+#define raw_res_spin_lock_irqsave(lock, flags)    \
+	({                                        \
+		int __ret;                        \
+		local_irq_save(flags);            \
+		__ret = raw_res_spin_lock(lock);  \
+		if (__ret)                        \
+			local_irq_restore(flags); \
+		__ret;                            \
+	})
+
+#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
+
  #endif /* __ASM_GENERIC_RQSPINLOCK_H */

Lockdep calls aren't included in the helper functions. That means all the *res_spin_lock*() calls will be outside the purview of lockdep. That also means a multi-CPU circular locking dependency involving a mixture of qspinlocks and rqspinlocks may not be detectable.

Cheers,
Longman





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux