Replace the opencoded (and incomplete) RCU manipulations with the new helpers to ensure a regular RCU context when calling into __ftrace_trace_stack(). Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- kernel/trace/trace.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2989,24 +2989,11 @@ void __trace_stack(struct trace_array *t int pc) { struct trace_buffer *buffer = tr->array_buffer.buffer; + unsigned long rcu_flags; - if (rcu_is_watching()) { - __ftrace_trace_stack(buffer, flags, skip, pc, NULL); - return; - } - - /* - * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), - * but if the above rcu_is_watching() failed, then the NMI - * triggered someplace critical, and rcu_irq_enter() should - * not be called from NMI. - */ - if (unlikely(in_nmi())) - return; - - rcu_irq_enter_irqsave(); + rcu_flags = trace_rcu_enter(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); - rcu_irq_exit_irqsave(); + trace_rcu_exit(rcu_flags); } /**