From: Thomas Gleixner <tglx@xxxxxxxxxxxxx> In some cases it's desirable to lock the seqlock w/o changing the seqcount. Provide functions for this, so we can avoid open coded constructs. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: stable-rt@xxxxxxxxxxxxxxx Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx> --- include/linux/seqlock.h | 64 +++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 64 insertions(+), 0 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 3e1f3f9..b44048d 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -188,6 +188,19 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) } /* + * Ditto w/o barriers + */ +static inline unsigned __read_seqbegin(const seqlock_t *sl) +{ + return __read_seqcount_begin(&sl->seqcount); +} + +static inline unsigned __read_seqretry(const seqlock_t *sl, unsigned start) +{ + return __read_seqcount_retry(&sl->seqcount, start); +} + +/* * Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. * Don't need preempt_disable() because that is in the spin_lock already. @@ -247,4 +260,55 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) spin_unlock_irqrestore(&sl->lock, flags); } +/* + * Instead of open coding a spinlock and a seqcount, the following + * functions allow to serialize on the seqlock w/o touching seqcount. + */ +static inline void seq_spin_lock(seqlock_t *sl) +{ + spin_lock(&sl->lock); +} + +static inline int seq_spin_trylock(seqlock_t *sl) +{ + return spin_trylock(&sl->lock); +} + +static inline void seq_spin_unlock(seqlock_t *sl) +{ + spin_unlock(&sl->lock); +} + +static inline void assert_seq_spin_locked(seqlock_t *sl) +{ + assert_spin_locked(&sl->lock); +} + +static inline void seq_spin_lock_nested(seqlock_t *sl, int subclass) +{ + spin_lock_nested(&sl->lock, subclass); +} + +/* + * For writers which need to take/release the lock w/o updating seqcount for + * whatever reasons the following functions allow to update the count + * after the lock has been acquired or before it is released. + */ +static inline void write_seqlock_begin(seqlock_t *sl) +{ + assert_spin_locked(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_seqlock_end(seqlock_t *sl) +{ + assert_spin_locked(&sl->lock); + write_seqcount_end(&sl->seqcount); +} + +static inline void write_seqlock_barrier(seqlock_t *sl) +{ + write_seqcount_barrier(&sl->seqcount); +} + #endif /* __LINUX_SEQLOCK_H */ -- 1.7.8.3 -- To unsubscribe from this list: send the line "unsubscribe stable-rt" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html