The following commit has been merged into the sched/core branch of tip: Commit-ID: cfe43f478b79ba45573ca22d52d0d8823be068fa Gitweb: https://git.kernel.org/tip/cfe43f478b79ba45573ca22d52d0d8823be068fa Author: Valentin Schneider <valentin.schneider@xxxxxxx> AuthorDate: Fri, 12 Nov 2021 18:52:01 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitterDate: Tue, 05 Apr 2022 10:24:42 +02:00 preempt/dynamic: Introduce preemption model accessors CONFIG_PREEMPT{_NONE, _VOLUNTARY} designate either: o The build-time preemption model when !PREEMPT_DYNAMIC o The default boot-time preemption model when PREEMPT_DYNAMIC IOW, using those on PREEMPT_DYNAMIC kernels is meaningless - the actual model could have been set to something else by the "preempt=foo" cmdline parameter. Same problem applies to CONFIG_PREEMPTION. Introduce a set of helpers to determine the actual preemption model used by the live kernel. Suggested-by: Marco Elver <elver@xxxxxxxxxx> Signed-off-by: Valentin Schneider <valentin.schneider@xxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Marco Elver <elver@xxxxxxxxxx> Acked-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Link: https://lore.kernel.org/r/20211112185203.280040-3-valentin.schneider@xxxxxxx --- include/linux/sched.h | 41 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/core.c | 12 ++++++++++++ 2 files changed, 53 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index d5e3c00..67f06f7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2117,6 +2117,47 @@ static inline void cond_resched_rcu(void) #endif } +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPTION, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d575b49..068c088 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8409,6 +8409,18 @@ static void __init preempt_dynamic_init(void) } } +#define PREEMPT_MODEL_ACCESSOR(mode) \ + bool preempt_model_##mode(void) \ + { \ + WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ + return preempt_dynamic_mode == preempt_dynamic_##mode; \ + } \ + EXPORT_SYMBOL_GPL(preempt_model_##mode) + +PREEMPT_MODEL_ACCESSOR(none); +PREEMPT_MODEL_ACCESSOR(voluntary); +PREEMPT_MODEL_ACCESSOR(full); + #else /* !CONFIG_PREEMPT_DYNAMIC */ static inline void preempt_dynamic_init(void) { }