Function trigger_all_cpu_backtrace() uses NMI to dump the stack traces of other CPU, it should actually be one of the ways to implement dump_cpu_task(). So try it first in dump_cpu_task(). At the same time, unnecessary duplicate code of upper-layer functions is eliminated. There is also a call to dump_cpu_task() in synchronize_rcu_expedited_wait(), which should also try to use NMI to dump the stack traces first. It is currently the result of this adjustment, so leave it unchanged. Signed-off-by: Zhen Lei <thunder.leizhen@xxxxxxxxxx> --- kernel/rcu/tree_stall.h | 8 +++----- kernel/sched/core.c | 3 +++ kernel/smp.c | 3 +-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index a001e1e7a99269c..80749d257ac2f78 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -368,7 +368,7 @@ static void rcu_dump_cpu_stacks(void) if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { if (cpu_is_offline(cpu)) pr_err("Offline CPU %d blocking current GP.\n", cpu); - else if (!trigger_single_cpu_backtrace(cpu)) + else dump_cpu_task(cpu); } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -486,8 +486,7 @@ static void rcuc_kthread_dump(struct rcu_data *rdp) pr_err("%s kthread starved for %ld jiffies\n", rcuc->comm, j); sched_show_task(rcuc); - if (!trigger_single_cpu_backtrace(cpu)) - dump_cpu_task(cpu); + dump_cpu_task(cpu); } /* Complain about starvation of grace-period kthread. */ @@ -515,8 +514,7 @@ static void rcu_check_gp_kthread_starvation(void) pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); } else { pr_err("Stack dump where RCU GP kthread last ran:\n"); - if (!trigger_single_cpu_backtrace(cpu)) - dump_cpu_task(cpu); + dump_cpu_task(cpu); } } wake_up_process(gpk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df8fe433642fa30..0e82073020bf0d1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11145,6 +11145,9 @@ struct cgroup_subsys cpu_cgrp_subsys = { void dump_cpu_task(int cpu) { + if (trigger_single_cpu_backtrace(cpu)) + return; + pr_info("Task dump for CPU %d:\n", cpu); sched_show_task(cpu_curr(cpu)); } diff --git a/kernel/smp.c b/kernel/smp.c index dd215f439426449..56ca958364aebeb 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -370,8 +370,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 * if (cpu >= 0) { if (static_branch_unlikely(&csdlock_debug_extended)) csd_lock_print_extended(csd, cpu); - if (!trigger_single_cpu_backtrace(cpu)) - dump_cpu_task(cpu); + dump_cpu_task(cpu); if (!cpu_cur_csd) { pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); arch_send_call_function_single_ipi(cpu); -- 2.25.1