From: Boqun Feng <boqun.feng@xxxxxxxxx> Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx> Signed-off-by: Lyude Paul <lyude@xxxxxxxxxx> --- arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++ arch/s390/include/asm/preempt.h | 19 +++++++++++++++++++ arch/x86/include/asm/preempt.h | 10 ++++++++++ include/asm-generic/preempt.h | 14 ++++++++++++++ 4 files changed, 61 insertions(+) diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index 0159b625cc7f0..49cb886c8e1dd 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -56,6 +56,24 @@ static inline void __preempt_count_sub(int val) WRITE_ONCE(current_thread_info()->preempt.count, pc); } +static inline int __preempt_count_add_return(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc += val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); + + return pc; +} + +static inline int __preempt_count_sub_return(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc -= val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); + + return pc; +} + static inline bool __preempt_count_dec_and_test(void) { struct thread_info *ti = current_thread_info(); diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 6ccd033acfe52..67a6e265e9fff 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -98,6 +98,25 @@ static __always_inline bool should_resched(int preempt_offset) return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset); } +static __always_inline int __preempt_count_add_return(int val) +{ + /* + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES + * enabled, gcc 12 fails to handle __builtin_constant_p(). + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { + return val + __atomic_add_const(val, &get_lowcore()->preempt_count); + } + } + return val + __atomic_add(val, &get_lowcore()->preempt_count); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + return __preempt_count_add_return(-val); +} + #define init_task_preempt_count(p) do { } while (0) /* Deferred to CPU bringup time */ #define init_idle_preempt_count(p, cpu) do { } while (0) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 919909d8cb77e..405e60f4e1a77 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -84,6 +84,16 @@ static __always_inline void __preempt_count_sub(int val) raw_cpu_add_4(pcpu_hot.preempt_count, -val); } +static __always_inline int __preempt_count_add_return(int val) +{ + return raw_cpu_add_return_4(pcpu_hot.preempt_count, val); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + return raw_cpu_add_return_4(pcpu_hot.preempt_count, -val); +} + /* * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule * a decrement which hits zero means we have no preempt_count and should diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 51f8f3881523a..c8683c046615d 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val) *preempt_count_ptr() -= val; } +static __always_inline int __preempt_count_add_return(int val) +{ + *preempt_count_ptr() += val; + + return *preempt_count_ptr(); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + *preempt_count_ptr() -= val; + + return *preempt_count_ptr(); +} + static __always_inline bool __preempt_count_dec_and_test(void) { /* -- 2.48.1