Because of the requirement that no tracing happens until after we've incremented preempt_count, see nmi_enter() / trace_rcu_enter(), mark these functions as notrace. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3781,7 +3781,7 @@ static inline void preempt_latency_start } } -void preempt_count_add(int val) +void notrace preempt_count_add(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* @@ -3813,7 +3813,7 @@ static inline void preempt_latency_stop( trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); } -void preempt_count_sub(int val) +void notrace preempt_count_sub(int val) { #ifdef CONFIG_DEBUG_PREEMPT /*