On preempt_model_none() or preempt_model_voluntary() configurations rescheduling of kernel threads happens only when they allow it, and only at explicit preemption points, via calls to cond_resched() or similar. That leaves out contexts where it is not convenient to periodically call cond_resched() -- for instance when executing a potentially long running primitive (such as REP; STOSB.) This means that we either suffer high scheduling latency or avoid certain constructs. Define TIF_ALLOW_RESCHED to demarcate such sections. Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx> --- arch/x86/include/asm/thread_info.h | 2 ++ include/linux/sched.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d63b02940747..fc6f4121b412 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -100,6 +100,7 @@ struct thread_info { #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ +#define TIF_RESCHED_ALLOW 30 /* reschedule if needed */ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -122,6 +123,7 @@ struct thread_info { #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) #define _TIF_ADDR32 (1 << TIF_ADDR32) +#define _TIF_RESCHED_ALLOW (1 << TIF_RESCHED_ALLOW) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW_BASE \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 177b3f3676ef..4dd3d91d990f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2245,6 +2245,36 @@ static __always_inline bool need_resched(void) return unlikely(tif_need_resched()); } +#ifdef TIF_RESCHED_ALLOW +/* + * allow_resched() .. disallow_resched() demarcate a preemptible section. + * + * Used around primitives where it might not be convenient to periodically + * call cond_resched(). + */ +static inline void allow_resched(void) +{ + might_sleep(); + set_tsk_thread_flag(current, TIF_RESCHED_ALLOW); +} + +static inline void disallow_resched(void) +{ + clear_tsk_thread_flag(current, TIF_RESCHED_ALLOW); +} + +static __always_inline bool resched_allowed(void) +{ + return unlikely(test_tsk_thread_flag(current, TIF_RESCHED_ALLOW)); +} + +#else +static __always_inline bool resched_allowed(void) +{ + return false; +} +#endif /* TIF_RESCHED_ALLOW */ + /* * Wrappers for p->thread_info->cpu access. No-op on UP. */ -- 2.31.1