lazy preempt count is great mechanism to help ordinary SCHED_OTHER tasks' throughput Under PREEMPT_RT. But current implementation relies on each arch-specific code to check the preempt_lazy_count in should_resched() and __preempt_count_dec_and_test(), if the arch, e.g riscv use the asm-generic preempt implementation, it losts the great lazy preempt mechanism. Signed-off-by: Jisheng Zhang <jszhang@xxxxxxxxxx> --- include/asm-generic/preempt.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index b4d43a4af5f7..b583e7c38ccf 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -59,6 +59,11 @@ static __always_inline void __preempt_count_sub(int val) *preempt_count_ptr() -= val; } +#ifdef CONFIG_PREEMPT_LAZY +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) +#else +#define preempt_lazy_count() (0) +#endif static __always_inline bool __preempt_count_dec_and_test(void) { /* @@ -66,7 +71,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) * operations; we cannot use PREEMPT_NEED_RESCHED because it might get * lost. */ - return !--*preempt_count_ptr() && tif_need_resched(); + return !--*preempt_count_ptr() && !preempt_lazy_count() && tif_need_resched(); } /* @@ -75,6 +80,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) static __always_inline bool should_resched(int preempt_offset) { return unlikely(preempt_count() == preempt_offset && + !preempt_lazy_count() && tif_need_resched()); } -- 2.40.1